예제 #1
0
def untargeted_detection(model, 
                         img, 
                         dataset, 
                         lr, 
                         u_radius, 
                         cap=1000,
                         margin=20,
                         use_margin=False):
    model.eval()
    x_var = torch.autograd.Variable(img.clone().cuda(), requires_grad=True)
    true_label = model(transform(x_var.clone(), dataset=dataset)).data.max(1, keepdim=True)[1][0].item()
    optimizer_s = optim.SGD([x_var], lr=lr)
    counter = 0
    while model(transform(x_var.clone(), dataset=dataset)).data.max(1, keepdim=True)[1][0].item() == true_label:
        optimizer_s.zero_grad()
        output = model(transform(x_var, dataset=dataset))
        if use_margin:
            _, top2_1 = output.data.cpu().topk(2)
            argmax11 = top2_1[0][0]
            if argmax11 == true_label:
                argmax11 = top2_1[0][1]
            loss = (output[0][true_label] - output[0][argmax11] + margin).clamp(min=0)
        else:
            loss = -F.cross_entropy(output, torch.LongTensor([true_label]).cuda())
        loss.backward()

        x_var.data = torch.clamp(x_var - lr * x_var.grad.data, min=0, max=1)
        x_var.data = torch.clamp(x_var - img, min=-u_radius, max=u_radius) + img
        counter += 1
        if counter >= cap:
            break
    return counter
예제 #2
0
def L4_function(model,
                img,
                dataset,
                allstep,
                lr,
                u_radius,
                margin=20,
                use_margin=False):
    x_var = torch.autograd.Variable(img.clone().cuda(), requires_grad=True)
    true_label = model(transform(x_var.clone(), dataset=dataset)).data.max(
        1, keepdim=True)[1][0].item()
    optimizer_s = optim.SGD([x_var], lr=lr)
    with torch.enable_grad():
        for step in range(allstep):
            optimizer_s.zero_grad()
            output = model(transform(x_var, dataset=dataset))
            if use_margin:
                _, top2_1 = output.data.cpu().topk(2)
                argmax11 = top2_1[0][0]
                if argmax11 == true_label:
                    argmax11 = top2_1[0][1]
                loss = (output[0][true_label] - output[0][argmax11] +
                        margin).clamp(min=0)
            else:
                loss = -F.cross_entropy(output,
                                        torch.LongTensor([true_label]).cuda())
            loss.backward()
            x_var.data = torch.clamp(x_var - lr * x_var.grad.data,
                                     min=0,
                                     max=1)
            x_var.data = torch.clamp(x_var - img, min=-u_radius,
                                     max=u_radius) + img
    return x_var
예제 #3
0
    def __getitem__(self, index):
        """
        Args:
            index (int): Index
        Returns:
            tuple: (image, target) where target is index of the target class.
        """

        identity_idx = np.mod(index, 10)
        a_imgs, b_imgs = self.input_a[identity_idx], self.input_b[identity_idx]

        if self.train:
            a_img = random.choice(a_imgs)
            b_img = random.choice(b_imgs)
        else:
            a_img = a_imgs[index]
            b_img = b_imgs[index]

        a_img = transform(a_img, resize=32)
        b_img = transform(b_img, resize=32)

        # svhn_img = np.transpose(svhn_img, (1, 2, 0))
        # svhn_img = Image.fromarray(svhn_img, mode='RGB')
        # svhn_img = self.transformB(svhn_img)

        return a_img, b_img, index
예제 #4
0
 def get_original_calculated_distance(self):
     if all([self.point, self.orig_point]):
         return transform(4269, 3857, self.orig_point).distance(
             transform(4269, 3857, self.point)) * math.cos(
                 self.point.y * math.pi / 180.0)
     else:
         return 'Unable to caclulate: (orig: %s, calc: %s)' % (
             self.orig_point, self.point)
예제 #5
0
 def __odom_update(self, data):
     self.position, self.orientation = utils.unwrap_pose(data.pose.pose)
     if len(self.path) > 1:
         self.path = collections.deque(utils.transform(
             LineString(self.path), self.position, self.orientation).coords)
         self.current_goal_point = utils.transform(self.current_goal_point, self.position, self.orientation)
     
     if self.current_goal_point.x < REACH_DIST and self.current_goal_point.y < REACH_DIST:
         return  # TODO: Add completion mech
예제 #6
0
 def __getitem__(self, item):
     ref_cloud = self.data[item, ...]
     R, t = self.Rs[item], self.ts[item]
     ref_cloud = random_select_points(ref_cloud, m=self.npts)
     src_cloud_points = transform(ref_cloud[:, :3], R, t)
     src_cloud_normal = transform(ref_cloud[:, 3:], R)
     src_cloud = np.concatenate([src_cloud_points, src_cloud_normal],
                                axis=-1)
     if self.train:
         ref_cloud[:, :3] = jitter_point_cloud(ref_cloud[:, :3])
         src_cloud[:, :3] = jitter_point_cloud(src_cloud[:, :3])
     if not self.normal:
         ref_cloud, src_cloud = ref_cloud[:, :3], src_cloud[:, :3]
     return ref_cloud, src_cloud, R, t
예제 #7
0
 def original_point(self):
     try:
         x, y, epsg_in = float(self.rec.coo.easting), float(
             self.rec.coo.northing), int(self.rec.coo.epsg_code)
         return transform(epsg_in, 4269, Point(x, y))
     except:
         return Point(0, 0)
예제 #8
0
    def define_location_quality(self):
        """
        parameters:
            self : we use self.point and check it against the dictionary of shape_corners
            shapes = {'name1': APIshape, name2: Abstract/Section, name3: qq, ...}
        """

        # if we were unable to calculate a point
        if not self.point:
            return 0

        score = self.location_quality
        for name, shape in self.reference_shapes.iteritems():
            try:
                shape = ensure_polygon(shape)

                if shape.intersection(transform(4269, 3857, self.point)):
                    score += math.pow(
                        area(shape, units='square miles'),
                        -1)  # if within a square mile => score = 1
                    # print '\tpoint inside the shape: %s, area: %.3f square miles' % (name, area(shape, units='square miles'))
                else:
                    pass
                    # print '\tpoint outside the shape: %s' % name
            except:
                print '\tinvalid geomtery for %s' % name

        # reset score if the point does not fall within any of the related shapes
        if score == self.location_quality:
            score = -1

        self.location_quality = score
예제 #9
0
    def __getitem__(self, i):
        # Read image
        image = Image.open(self.images[i], mode='r')

        if image.size[0] > 1000:
            basewidth = 500
            wpercent = (basewidth / float(image.size[0]))
            hsize = int((float(image.size[1]) * float(wpercent)))
            image = image.resize((basewidth, hsize), Image.ANTIALIAS)

        #print(i, image.size)

        image = image.convert('RGB')

        # Read objects in this image (bounding boxes, labels, difficulties)
        objects = self.objects[i]
        boxes = torch.FloatTensor(objects['boxes'])  # (n_objects, 4)
        labels = torch.LongTensor(objects['labels'])  # (n_objects)

        # Apply transformations
        image, boxes, labels = transform(image,
                                         boxes,
                                         labels,
                                         split=self.split)

        return image, boxes, labels
예제 #10
0
    def __init__(self, root, train=True):
        self.root = os.path.expanduser(root)
        self.transform = transform()
        self.train = train  # training set or test set

        self.input_a, self.input_b = make_dataset_fixed(self.train)
        print(self.input_a.shape[0])
예제 #11
0
    def __getitem__(self, i):
        # Read image
        
        image = Image.open(self.images[i], mode='r')
        image = image.convert('RGB')        
        # Read objects in this image (bounding boxes, labels, difficulties)
        objects = self.objects[i]
        #print(objects)
        boxes = torch.FloatTensor(objects['boxes'])  # (n_objects, 4)
        #print(boxes)
        labels = torch.LongTensor(objects['labels'])  # (n_objects)
        #print(labels)
        difficulties = torch.ByteTensor(objects['difficulties'])  # (n_objects)
        #============draw pic for exam===========
        '''image_o=copy.deepcopy(image)
        coors=boxes
        for i in range(len(coors)):
            coors_set1=[(coors[i][0],coors[i][1]),(coors[i][2],coors[i][1]),(coors[i][2],coors[i][3]),(coors[i][0],coors[i][3]),(coors[i][0],coors[i][1])]
            draw = ImageDraw.Draw(image_o)
            draw.line(coors_set1,width=10,fill='red')
        image_o.show()'''
        #============draw pic for exam===========
        # Discard difficult objects, if desired
        if not self.keep_difficult:
            boxes = boxes[1 - difficulties]
            labels = labels[1 - difficulties]
            difficulties = difficulties[1 - difficulties]

        # Apply transformations
        image, boxes, labels, difficulties = transform(image, boxes, labels, self.input_size, difficulties, split=self.split)        
        return image, boxes, labels, difficulties
    def __getitem__(self, i):
        # Read image
        image = Image.open(self.images[i], mode="r")
        image = image.convert("RGB")

        # Read objects in this image (bounding boxes, labels, difficulties)
        objects = self.objects[i]
        boxes = torch.FloatTensor(objects["boxes"])  # (n_objects, 4)
        labels = torch.LongTensor(objects["labels"])  # (n_objects)
        difficulties = torch.ByteTensor(objects["difficulties"])  # (n_objects)

        # Discard difficult objects, if desired
        if not self.keep_difficult:
            boxes = boxes[1 - difficulties]
            labels = labels[1 - difficulties]
            difficulties = difficulties[1 - difficulties]

        # Apply transformations
        image, boxes, labels, difficulties = transform(image,
                                                       boxes,
                                                       labels,
                                                       difficulties,
                                                       split=self.split)

        return image, boxes, labels, difficulties
예제 #13
0
파일: test01.py 프로젝트: 12345fengce/AI_CV
    def r(self):
        """transform out of tensor to numpy
            filter with confidence
            calculate coordinates
            filter with NMS
            crop image from original image for ONet's input
            draw"""
        start_time = time.time()
        data, prior = self.p()
        with torch.no_grad():
            confi, offset = self.rnet(data.cuda())
        confi = confi.cpu().numpy().flatten()
        offset = offset.cpu().numpy()

        offset, prior, confi = offset[confi >= 0.99], prior[confi >= 0.99], confi[confi >= 0.99]

        offset, landmarks = offset[:, :4], offset[:, 4:]
        offset, landmarks = utils.transform(offset, landmarks, prior)

        boxes = np.hstack((offset, np.expand_dims(confi, axis=1), landmarks))
        boxes = utils.NMS(boxes, threshold=0.6, ismin=False)

        o_data, o_prior = utils.crop_to_square(boxes[:, :5], 48, self.image)

        o_prior = np.stack(o_prior, axis=0)
        o_data = torch.stack(o_data, dim=0)
        end_time = time.time()
        print("RNet create {} candidate items\ncost {}s!".format(o_data.size(0), end_time - start_time))
        utils.draw(boxes, self.test_img, "RNet")
        return o_data, o_prior
예제 #14
0
파일: test02.py 프로젝트: 12345fengce/AI_CV
    def o(self):
        """transform out of tensor to numpy
            filter with confidence
            calculate coordinates
            filter with NMS
            draw"""
        start_time = datetime.datetime.now()
        data, prior = self.r()
        with torch.no_grad():
            confi, offset = self.onet(data.cuda())
        confi = confi.cpu().numpy().flatten()
        offset = offset.cpu().numpy()

        offset, prior, confi = offset[confi >= 0.999], prior[
            confi >= 0.999], confi[confi >= 0.999]

        offset, landmarks = offset[:, :4], offset[:, 4:]
        offset, landmarks = utils.transform(offset, landmarks, prior)

        boxes = np.hstack(
            (offset, np.expand_dims(confi,
                                    axis=1), landmarks))  # 将偏移量与置信度结合,进行NMS
        boxes = utils.NMS(boxes, threshold=0.4, ismin=True)
        end_time = datetime.datetime.now()
        print("ONet cost {}ms".format(
            (end_time - start_time).microseconds / 1000))
        return boxes
예제 #15
0
    def define_location_quality(self):

        """
        parameters:
            self : we use self.point and check it against the dictionary of shape_corners
            shapes = {'name1': APIshape, name2: Abstract/Section, name3: qq, ...}
        """

        # if we were unable to calculate a point
        if not self.point:
            return 0

        score = self.location_quality
        for name, shape in self.reference_shapes.iteritems():
            try:
                shape = ensure_polygon(shape)

                if shape.intersection(transform(4269, 3857, self.point)): 
                    score += math.pow(area(shape, units='square miles'), -1)  # if within a square mile => score = 1
                    # print '\tpoint inside the shape: %s, area: %.3f square miles' % (name, area(shape, units='square miles'))
                else:
                    pass
                    # print '\tpoint outside the shape: %s' % name
            except:
                print '\tinvalid geomtery for %s' % name

        # reset score if the point does not fall within any of the related shapes
        if score == self.location_quality:
            score = -1

        self.location_quality = score
예제 #16
0
    def __getitem__(self, index: int):
        """
        Args:
            index (int): Index
        Returns:
            tuple: (image, target) where target is a dictionary of the XML tree.
        """
        trans = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
        ])
        img = Image.open(self.images[index]).convert('RGB')
        target = self.parse_voc_xml(
            ET.parse(self.annotations[index]).getroot())

        img = trans(img)

        bndboxes, clses, difficulties = self.transform_annotation_to_bbox(
            target)

        image, bboxes, labels, difficulties = transform(
            img, bndboxes, clses, difficulties,
            "TRAIN" if self.train else "TEST")

        return image, bboxes, labels, difficulties
예제 #17
0
    def __getitem__(self, i):
        # Read image
        image = Image.open(self.images[i], mode='r')
        image = image.convert('RGB')

        # Read objects in this image (bounding boxes, labels, difficulties)
        objects = self.objects[i]
        boxes = []
        labels = []
        for object in objects:
            boxes.append(object['points'])
            labels.append(object['label'])
        boxes = torch.FloatTensor(boxes)  # (n_objects, 4)
        labels = torch.LongTensor(labels)  # (n_objects)
        #difficulties = torch.ByteTensor(objects['difficulties'])  # (n_objects)

        # Discard difficult objects, if desired
        '''
        if not self.keep_difficult:
            boxes = boxes[1 - difficulties]
            labels = labels[1 - difficulties]
            difficulties = difficulties[1 - difficulties]
        '''
        # Apply transformations
        image, boxes, labels = transform(image, boxes, labels, split=self.split)

        return image, boxes, labels
예제 #18
0
 def _create_dataloader(self, dataset_path: str):
     dataset = RoomDataset(dataset_path, transform())
     dataloader = torch.utils.data.DataLoader(dataset,
                                              batch_size=self.batch_size,
                                              num_workers=0,
                                              shuffle=True)
     return dataloader
예제 #19
0
    def __getitem__(self, i):
        # Read image
        item_i = self.json_file[i]
        image = Image.open(self.data_folder + '/images/' + item_i['image'],
                           mode='r')
        image = image.convert('RGB')

        # Read objects in this image (bounding boxes, labels, difficulties)
        # objects = self.objects[i]
        center = item_i['center']
        scale = item_i['scale']
        bbox = [
            center[0] - 100 * scale, center[1] - 100 * scale,
            center[0] + 100 * scale, center[1] + 100 * scale
        ]
        boxes = torch.FloatTensor(bbox)  # (n_objects, 4)
        labels = torch.LongTensor([1])  # (n_objects)
        difficulties = torch.ByteTensor([0])  # (n_objects)

        # Discard difficult objects, if desired
        # if not self.keep_difficult:
        #     boxes = boxes[1 - difficulties]
        #     labels = labels[1 - difficulties]
        #     difficulties = difficulties[1 - difficulties]

        # Apply transformations
        image, boxes, labels, difficulties = transform(image,
                                                       boxes,
                                                       labels,
                                                       difficulties,
                                                       split=self.split)

        return image, boxes, labels, difficulties
예제 #20
0
def separate(mixture, model, params, device):
    labels = ['s%d' % i for i in range(1, params['num_attractors'] + 1)]
    estimates = {}

    mix = mixture
    if (len(mix.shape) > 1):
        mix = mixture[:, 0]
    _, mix = utils.mask_mixture(1, mix, params['n_fft'], params['hop_length'])
    log_spec = utils.transform(mix, params['n_fft'], params['hop_length'])
    silence_mask = log_spec > -25
    log_spec = utils.whiten(log_spec)

    with torch.no_grad():
        input_data = torch.from_numpy(log_spec).unsqueeze(0).requires_grad_().to(device)
        if 'DeepAttractor' in str(model):
            with torch.no_grad():
                masks, _, embedding, _ = model(input_data, one_hots=None)

            clusterer = KMeans(n_clusters=params['num_attractors'])
            embedding_ = embedding.squeeze(0).cpu().data.numpy()
            clusterer.fit(embedding_[silence_mask.flatten()])
            assignments = clusterer.predict(embedding_)
            assignments = assignments.reshape((masks.shape[1], masks.shape[2]))

    for i, label in enumerate(labels):
        mask = (assignments == i).T.astype(float)
        source, mix = utils.mask_mixture(mask, mix, params['n_fft'], params['hop_length'])
        estimates[label] = source

    return estimates
예제 #21
0
 def _generate_reference(self, aa):
     self.reference = {}
     structure = generateAA(aa)
     structure = transform(structure)
     self.angles = getAngles(structure)
     for atom in structure.get_atoms():
         self.reference[atom.get_name()] = np.array(atom.get_coord())
예제 #22
0
def L3_function(model,
                img,
                target_lable,
                dataset,
                allstep,
                lr,
                s_radius,
                margin=20,
                use_margin=False):
    x_var = torch.autograd.Variable(img.clone().cuda(), requires_grad=True)
    optimizer_s = optim.SGD([x_var], lr=lr)
    with torch.enable_grad():
        for step in range(allstep):
            optimizer_s.zero_grad()
            output = model(transform(x_var, dataset=dataset))
            if use_margin:
                target_lable = target_lable[0].item()
                _, top2_1 = output.data.cpu().topk(2)
                argmax11 = top2_1[0][0]
                if argmax11 == target_l:
                    argmax11 = top2_1[0][1]
                loss = (output[0][argmax11] - output[0][target_l] +
                        margin).clamp(min=0)
            else:
                loss = F.cross_entropy(output, target_lable)
            loss.backward()
            x_var.data = torch.clamp(x_var - lr * x_var.grad.data,
                                     min=0,
                                     max=1)
            x_var.data = torch.clamp(x_var - img, min=-s_radius,
                                     max=s_radius) + img
    return x_var
예제 #23
0
    def segment_images_iter(self):
        images = {}
        audios = {}
        counter_images = 0
        for batch_id, (image_input, audio_input, _, nframes, path, image_raw) in enumerate(self.dataloader):

            v_init = self.z[int(path[0])]
            z_img = torch.FloatTensor(audio_input.size(0), v_init.shape[0])

            for k in range(audio_input.size(0)):
                z_img[k, :] = self.z[int(path[k])]

            image_input = self.generator.generate_images(z_img, intervention=None)
            image_input = utils.transform(image_input)

            audio_input = audio_input.cuda(async=True)

            model_output = self.model(image_input, audio_input, [])
            image_output = model_output[0]
            audio_output = model_output[1]

            pooling_ratio = round(audio_input.size(3) / audio_output.size(3))
            nframes = nframes.div(pooling_ratio)

            # Compute matchmap to detect where there are important concepts that we want to cluster (this time in image)
            for i in range(image_input.shape[0]):
                nF = nframes[i]
                matchmap_i = utils.compute_matchmap(image_output[i], audio_output[i][:, :, 0:nF])
                matchmap_i_mean = matchmap_i.mean(2).view(-1)
                indexes = np.where(matchmap_i_mean > 0.9 * matchmap_i_mean.max())[0]
                features_im = image_output[i].view(image_output.shape[1], -1)[..., indexes].cpu().numpy()

                product = np.matmul(self.centroids, features_im)

                # For each selected superpixel in the image, find top 5 concepts
                seg_image = {}
                for j, index in enumerate(indexes):
                    clust = np.argsort(-product[:, j])[:5]
                    seg_image[index] = clust
                images[path[i]] = seg_image

                # Also for the audio, for testing purposes
                matchmap_i_max = matchmap_i.max(1)[0].max(0)[0]
                indexes = np.where(matchmap_i_max > 0.9 * matchmap_i_max.max())[0]
                features_au = audio_output[i].view(audio_output.shape[1], -1)[..., indexes].cpu().numpy()

                product = np.matmul(self.centroids, features_au)

                # For each selected superpixel in the image, find top 5 concepts
                seg_audio = {}
                for j, index in enumerate(indexes):
                    clust = np.argsort(-product[:, j])[:5]
                    seg_audio[index + 20] = clust
                audios[path[i]] = seg_audio

                counter_images += 1
                if counter_images >= self.num_images_segment:
                    return images, audios

        return images, audios
    def __getitem__(self, i):
        # Read image
        image = Image.open(self.images[i], mode='r')
        image = image.convert('RGB')

        # Read objects in this image (bounding boxes, labels, difficulties)
        objects = self.objects[i]
        boxes = torch.FloatTensor(objects['boxes'])  # (n_objects, 4)

        if boxes.size()[0] == 0:
            return None

        labels = torch.LongTensor(objects['labels'])  # (n_objects)
        difficulties = torch.ByteTensor(objects['difficulties'])  # (n_objects)

        # Discard difficult objects, if desired
        if not self.keep_difficult:
            boxes = boxes[1 - difficulties]
            labels = labels[1 - difficulties]
            difficulties = difficulties[1 - difficulties]

        # Apply transformations
        image, boxes, labels, difficulties = transform(image,
                                                       boxes,
                                                       labels,
                                                       difficulties,
                                                       split=self.split)

        return image, boxes, labels, difficulties
예제 #25
0
    def __getitem__(self, index):
        file = self.database[index]
        imgname = file.split(".")[0]+".jpg"
        imgpath = self.path+"/images/"+imgname
        img = utils.transform(Image.open(imgpath))

        labelfile = self.path+"/labels/"+file
        with open(labelfile, "r") as f:
            labellist = f.readlines()
        label = []
        for labelstr in labellist:
            labelstr = labelstr.replace(" ", ",").replace("\n", "")
            label.append(torch.tensor(eval(labelstr), dtype=torch.int8))
        label = torch.stack(label, dim=0)

        normalize = nn.AdaptiveAvgPool2d((cfg.SIZE[1], cfg.SIZE[0]))
        _, h, w = img.size()
        if w != cfg.SIZE[0] or h != cfg.SIZE[1]:
            if w < h:
                img = img.permute(0, 2, 1)
                label = label.permute(1, 0)
            img = normalize(img)
            label = normalize(label.unsqueeze(dim=0).float())

        return img, label.long().squeeze()
예제 #26
0
 def post(self, id):
     args = parser.parse_args(strict=True)
     short_code = args["custom_short_code"]
     long_url = args["long_url"]
     urlmap = URLMapping.query.filter_by(long_url=long_url).first()
     if urlmap:  # 长url已经存在,此时如果自定义了短码则忽略
         return urlmap.to_json(), 200
     else:  # long_url不存在
         if short_code:  # 用户自定义了短码
             urlmap = URLMapping.query.filter_by(
                 short_code=short_code).first()
             if urlmap:  # 短码存在
                 return {
                     "msg": "short_code {} already exist".format(short_code)
                 }, 202
             else:  # 短码不存在
                 um = URLMapping(long_url=long_url,
                                 short_code=short_code,
                                 item_type="user-defined",
                                 id_used=False,
                                 user_id=g.current_user.id)
                 db.session.add(um)
                 db.session.commit()
                 return um.to_json(), 200
         else:  # long_url不存在,用户未自定义短码
             custom_um = URLMapping.query.filter_by(id_used=False).first()
             if custom_um:
                 real_short_code = transform(custom_um.id)
                 um = URLMapping(long_url=long_url,
                                 short_code=real_short_code,
                                 id_used=False,
                                 user_id=g.current_user.id)
                 custom_um.id_used = True
                 db.session.add_all([um, custom_um])
                 db.session.commit()
                 return um.to_json(), 200
             else:
                 um = URLMapping(long_url=long_url,
                                 short_code="placeholder",
                                 id_used=True,
                                 user_id=g.current_user.id)
                 db.session.add(um)
                 db.session.commit()
                 um.short_code = transform(um.id)
                 db.session.add(um)
                 db.session.commit()
                 return um.to_json(), 200
예제 #27
0
 def __getitem__(self, index):
     img_name, *data = self.label[index].split()
     img_path = self.path + "/img/" + img_name
     img = Image.open(img_path)
     img = utils.transform(img)
     data = np.array(data, dtype=np.float)
     label = self.__getlabel__(data)
     return img, torch.Tensor(label)
예제 #28
0
    def __getitem__(self, index):
        img_name = self.database[index]
        label = img_name.split("_")[0]
        img = self.path + "/" + label + "/" + img_name

        img = utils.transform(Image.open(img))
        label = torch.tensor(int(label))
        return img, label
예제 #29
0
파일: test.py 프로젝트: hbwzhsh/NLP-1
def generate(start_word, length):
    parser = argparse.ArgumentParser()
    parser.add_argument("--vocab_file",
                        type=str,
                        default="data/vocab.pkl",
                        help="Vocabulary dictionary")
    parser.add_argument("--vocab_size",
                        type=int,
                        default=2854,
                        help="Vocabulary size")
    parser.add_argument("--embedding_dim",
                        type=int,
                        default=256,
                        help="Dimensionality of the words embedding")
    parser.add_argument("--rnn_size",
                        type=int,
                        default=128,
                        help="Hidden units of rnn layer ")
    parser.add_argument("--num_layers",
                        type=int,
                        default=2,
                        help="Number of rnn layer")
    parser.add_argument("--batch_size",
                        type=int,
                        default=1,
                        help="Minibatch size")
    args, _ = parser.parse_known_args()

    vocab_dict = utils.load_vocab(args.vocab_file)
    index2word = dict(zip(vocab_dict.values(), vocab_dict.keys()))

    text = [start_word]
    text_data = utils.transform(text, vocab_dict)

    checkpoint_dir = os.path.abspath(
        os.path.join(os.path.curdir, "checkpoints"))
    checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)
    graph = tf.Graph()
    with graph.as_default():
        sess = tf.Session()
        with sess.as_default():
            rnn = RNNLM(vocab_size=args.vocab_size,
                        embedding_dim=args.embedding_dim,
                        rnn_size=args.rnn_size,
                        num_layers=args.num_layers,
                        batch_size=args.batch_size,
                        training=False)
            saver = tf.train.Saver()
            saver.restore(sess, checkpoint_file)

            for _ in range(length):
                data = np.array([text_data])
                predictions = sess.run(rnn.prediction,
                                       feed_dict={rnn.input_data: data})
                text_data.append(predictions[-1])

    content = [index2word[index] for index in text_data]
    return "".join(content)
예제 #30
0
 def decode(self, y):
     u = np.dot(self.M, y)
     max_ind = int(np.argmax(np.abs(u)))
     sign_ele = np.sign(u[max_ind])
     binary_str = ("{0:0" + str(self.i + 1) + "b}").format(max_ind)
     symbol_array = utils.transform(
         np.array(list(map(int, list(binary_str)))))
     symbol_array[0] = sign_ele
     return symbol_array
예제 #31
0
def set_in_biplist(build_params, filename, key, value):
	if isinstance(value, str) or isinstance(value, unicode):
		value = pystache.render(value, build_params['app_config'])
	
	found_files = glob.glob(filename)
	for found_file in found_files:
		plist = biplist.readPlist(found_file)
		plist = utils.transform(plist, key, lambda _: value, allow_set=True)
		biplist.writePlist(plist, found_file)
예제 #32
0
    def val_next_batch(self, batch_size):
        batch_indexs = np.random.choice(range(self.val_data.shape[0]), batch_size, replace=False)
        batch_imgs = self.val_data[batch_indexs]

        # resize (32, 32, 3) to (64, 64, 3)
        batch_imgs_ = [utils.transform(scipy.misc.imresize(batch_imgs[idx], (self.image_size[0], self.image_size[1])))
                       for idx in range(batch_imgs.shape[0])]

        return np.asarray(batch_imgs_)
예제 #33
0
def resolve_urls(build, *url_locations):
	'''Include "src" prefix for relative URLs, e.g. ``file.html`` -> ``src/file.html``
	
	``url_locations`` uses::
	
	* dot-notation to descend into a dictionary
	* ``[]`` at the end of a field name to denote an array
	* ``*`` means all attributes on a dictionary
	'''
	def resolve_url_with_uuid(url):
		return utils._resolve_url(build.config, url, 'src')
	for location in url_locations:
		build.config = utils.transform(build.config, location, resolve_url_with_uuid)
예제 #34
0
    def assign_centroid(self):

        if self.reference_shapes:

            # order shapes by area
            l = sorted(self.reference_shapes.iteritems(), key=lambda x: x[1].area, reverse=True)

            # the smallest polygon is the last in the
            centroid_name, polygon = l[-1]

            self.point = transform(3857, 4269, polygon.centroid)
            
            # assign the location quality score: number of referenced polygons + 5
            self.centroid_assigned = centroid_name
            self.location_quality = -5
def set_in_biplist(build, filename, key, value):
    # biplist import must be done here, as in the server context, biplist doesn't exist
    import biplist

    if isinstance(value, str):
        value = utils.render_string(build.config, value)

    build.log.debug("setting {key} to {value} in {files}".format(key=key, value=value, files=filename))

    found_files = glob.glob(filename)
    if len(found_files) == 0:
        build.log.warning('No files were found to match pattern "%s"' % filename)
    for found_file in found_files:
        plist = biplist.readPlist(found_file)
        plist = utils.transform(plist, key, lambda _: value, allow_set=True)
        biplist.writePlist(plist, found_file)
예제 #36
0
 def get_original_calculated_distance(self):
     if all([self.point, self.orig_point]):
         return transform(4269, 3857, self.orig_point).distance(transform(4269, 3857, self.point)) * math.cos(self.point.y * math.pi / 180.0)
     else:
         return 'Unable to caclulate: (orig: %s, calc: %s)' % (self.orig_point, self.point)
예제 #37
0
 def original_point(self):
     try:
         x, y, epsg_in = float(self.rec.coo.easting), float(self.rec.coo.northing), int(self.rec.coo.epsg_code) 
         return transform(epsg_in, 4269, Point(x,y))
     except:
         return Point(0,0)
예제 #38
0
        # TODO
        # Rename subdirs

        for filename in filenames:
            # For a file /foo/bar/spam.eggs
            # extension        = 'eggs'
            # filepath         = '/foo/bar/spam.eggs'
            # base             = 'spam.eggs'
            # base_without_ext = 'spam'
            extension = os.path.splitext(filename)[1][1:]
            filepath = os.path.join(dirpath, filename)
            base = os.path.basename(filepath)  # No need to store in var
            base_without_ext = os.path.splitext(base)[0]

            if extension in config.transformation:
                new_name = os.path.join(dirpath, base_without_ext) + '.' + utils.transform(extension)
                if config.transformation[extension]['change_name']:
                    new_name = os.path.join(dirpath, utils.get_random_name()) + '.' + utils.transform(extension)

                # First save data for future unhiding
                file_transform[new_name] = filepath
                os.rename(filepath, new_name)
    file_transform['hidden'] = True
    pickle.dump(file_transform, f, pickle.HIGHEST_PROTOCOL)
    f.close()

else:
    f = open('temp', 'rb')
    file_transform = pickle.load(f)
    for key in file_transform:
        if key != 'hidden':
예제 #39
0
파일: marc2olac.py 프로젝트: olac/olac
# clean up temporary files, if necessary
if os.path.isfile(marcxml_filename):
    for f in splitfiles:
        os.remove(f)
else:
    # remove processing directory, restore original files from backup
    shutil.rmtree(marcxml_filename)
    os.rename(marcxml_filename + '_backup',marcxml_filename)

# generate an HTML file, if appropriate
if options.do_html_output and stage >= 3:
    html_out = config.get('system', 'html_output')

    # rename output file if necessary
    if options.inverse:
        dotindex = html_out.find('.')
        if dotindex != -1:
            html_out = html_out[0:dotindex] + '.inverse' + html_out[dotindex:]
        else:
            html_out += '.inverse'

    print "Generating HTML output to %s" % html_out
    utils.transform(config,libpath + sep + 'repository2html',olacxml_filename,
            projpath + sep + html_out)

if stage < final_stage:
    print "Done."
else:
    print "Done.  OLAC Repository %s generated in %s." % (olacxml_filename, projectname)