Exemplo n.º 1
0
def get_dataloader(config):
    # Obtain image and label transformations according to random seeding above
    # Augmentation occurs when is_train flag is set to True, hence not during validation
    transform_img_train, transform_label_train = get_transform(config,
                                                               is_train=True)
    transform_img_val, transform_label_val = get_transform(config,
                                                           is_train=False)

    # Create Dataset from class defined above
    train_set = Dataset(config['root'] + '/train', config['size'],
                        transform_img_train, transform_label_train)
    val_set = Dataset(config['root'] + '/validate', config['size'],
                      transform_img_val, transform_label_val)

    # DataLoader combines dataset and sampler (if provided) as well as provides iterable over dataset
    train_loader = DataLoader(train_set,
                              batch_size=config['batch_size'],
                              shuffle=config['shuffle'],
                              num_workers=0,
                              drop_last=False)
    val_loader = DataLoader(val_set,
                            batch_size=1,
                            shuffle=config['shuffle'],
                            num_workers=0,
                            drop_last=False)
    return train_loader, val_loader
    def __init__(self,
                 root,
                 split,
                 image_size=(300, 300),
                 keep_difficult=False):
        """
        A PyTorch Dataset class to be used in a PyTorch DataLoader to create batches.

        :param root: path to stored voc data
        :param split: split, 'TRAIN' or 'TEST', train and val dataset from both 2007 and 2012 is used for 'TRAIN' split,
            test dataset from 2007 is used for 'TEST' split.
        :param image_size: (height, width) for network input size
        :param keep_difficult: keep ot discard objects considered diffcult to detect
        """
        split = split.upper()
        assert split in {'TRAIN', 'TEST'}

        self.keep_difficult = keep_difficult
        self.transform = get_transform(image_size, split)

        if split == 'TRAIN':
            self.datasets = [
                VOCDetection(root, year='2007', image_set='trainval'),
                VOCDetection(root, year='2012', image_set='trainval')
            ]
        else:
            self.datasets = [VOCDetection(root, year='2007', image_set='test')]
Exemplo n.º 3
0
def eval(net, data_dict, ensemble_num, recalls):
    net.eval()
    data_set = ImageReader(data_dict, get_transform(DATA_NAME, 'test'))
    data_loader = DataLoader(data_set,
                             BATCH_SIZE,
                             shuffle=False,
                             num_workers=8)

    features = []
    with torch.no_grad():
        for inputs, labels in data_loader:
            out = net(inputs.to(DEVICE))
            out = F.normalize(out)
            features.append(out.cpu())
    features = torch.cat(features, 0)
    torch.save(
        features,
        'results/{}_test_features_{:03}.pth'.format(DATA_NAME, ensemble_num))
    # load feature vectors
    features = [
        torch.load('results/{}_test_features_{:03}.pth'.format(DATA_NAME, d))
        for d in range(1, ensemble_num + 1)
    ]
    features = torch.cat(features, 1)
    acc_list = recall(features, data_set.labels, rank=recalls)
    desc = ''
    for index, recall_id in enumerate(recalls):
        desc += 'R@{}:{:.2f}% '.format(recall_id, acc_list[index] * 100)
    print(desc)
Exemplo n.º 4
0
    def test_au_interp(self, src_img_name):

        src_img = Image.open(os.path.join(self.data_dir, src_img_name))

        transform = utils.get_transform()
        src_img = transform(src_img)
        print(src_img.shape)

        os.makedirs(self.test_interp_path + '/' + self.version + '/',
                    exist_ok=True)

        au_interp = self.FloatTensor(np.linspace(0, 5, 6))

        base_cond = self.FloatTensor(np.zeros(self.au_dim))

        for d in range(self.au_dim):
            for i in range(len(au_interp)):
                test_cond = base_cond
                test_cond[d] = au_interp[i]
                test_cond = test_cond.reshape(1, self.au_dim)

                # print(test_cond.shape, self.g_enc(src_img, 1).shape)

                img_tgt = self.g_dec(self.g_enc(src_img, 1), test_cond)
                save_image(
                    denorm(img_tgt.data),
                    self.test_interp_path + '/' + self.version + '/' + str(d) +
                    '_' + str(i) + '_' + src_img_name + '.jpg')
Exemplo n.º 5
0
    def __init__(self,
                 phase='train',
                 resize=224,
                 csv_file=csv_file,
                 root_dir=training_dir):
        """
        Args:
            csv_file (string): Path to the csv file with annotations.
            root_dir (string): Directory with all the images.
            transform (callable, optional): Optional transform to be applied
                on a sample.
        """
        self.phase = phase
        self.resize = (resize, resize)
        self.num_classes = 196

        self.root_dir = training_dir if phase == 'train' else testing_dir
        self.name2label, _ = mapping(csv_file)
        self.transform = get_transform(self.resize, phase)

        if self.phase == 'train':
            self.label_frame = pd.read_csv(csv_file)
            self.image_path = [
                os.path.join(self.root_dir, '{:06d}.jpg'.format(x))
                for x in self.label_frame['id'].to_list()
            ]
        else:
            self.image_path = glob(self.root_dir + '/*.jpg')
    def __init__(self, phase='train', resize=500):
        assert phase in ['train', 'val', 'test']
        self.phase = phase
        self.resize = resize

        variants_dict = {}
        with open(os.path.join(DATAPATH, 'variants.txt'), 'r') as f:
            for idx, line in enumerate(f.readlines()):
                variants_dict[line.strip()] = idx
        self.num_classes = len(variants_dict)

        if phase == 'train':
            list_path = os.path.join(DATAPATH, 'images_variant_trainval.txt')

        else:
            list_path = os.path.join(DATAPATH, 'images_variant_test.txt')

        self.images = []
        self.labels = []
        with open(list_path, 'r') as f:
            for line in f.readlines():
                fname_and_variant = line.strip()
                self.images.append(fname_and_variant[:FILENAME_LENGTH])
                self.labels.append(
                    variants_dict[fname_and_variant[FILENAME_LENGTH + 1:]])

        # transform
        self.transform = get_transform(self.resize, self.phase)
Exemplo n.º 7
0
    def __init__(self, phase='train', resize=500):
        assert phase in ['train', 'val', 'test']
        self.phase = phase
        self.resize = resize
        self.image_id = []
        self.num_classes = 200

        # get image path from images.txt
        with open(os.path.join(DATAPATH, 'images.txt')) as f:
            for line in f.readlines():
                id, path = line.strip().split(' ')
                image_path[id] = path

        # get image label from image_class_labels.txt
        with open(os.path.join(DATAPATH, 'image_class_labels.txt')) as f:
            for line in f.readlines():
                id, label = line.strip().split(' ')
                image_label[id] = int(label)

        # get train/test image id from train_test_split.txt
        with open(os.path.join(DATAPATH, 'train_test_split.txt')) as f:
            for line in f.readlines():
                image_id, is_training_image = line.strip().split(' ')
                is_training_image = int(is_training_image)

                if self.phase == 'train' and is_training_image:
                    self.image_id.append(image_id)
                if self.phase in ('val', 'test') and not is_training_image:
                    self.image_id.append(image_id)

        # transform
        self.transform = get_transform(self.resize, self.phase)
Exemplo n.º 8
0
 def __init__(self, device):
     self.device = device
     self.path = PATH_CAT_ATTR_PREDICTOR
     self.model = torch.load(self.path, map_location=device).to(device)
     self.model.eval()
     self.transform = get_transform(normalize=True)
     self.tasks = ['cat', 'slv', 'neck', 'ubl', 'lbl', 'clos']
Exemplo n.º 9
0
def write_geometry(feature, ttlfile):

    code_epci = feature.GetField("CODE_EPCI")
    polygon_uri = ign_multipolygon_epci_uri.replace("$", str(code_epci))

    geom = feature.GetGeometryRef()
    geom.Transform(utils.get_transform(2154, 4326))
    geom_string = '"' + crs84_uri + ' ' + geom.ExportToWkt(
    ) + '"^^<http://www.opengis.net/ont/geosparql#wktLiteral>'

    # Write polygon
    output = polygon_uri + ' a ' + ign_multipolygon_data_type + ' ;\n'
    output += '\t' + ' <http://www.opengis.net/ont/geosparql#asWKT> ' + geom_string + ' ;\n'
    output += '\t' + ' <http://data.ign.fr/def/geometrie#crs> ' + '<http://data.ign.fr/id/ignf/crs/WGS84GDD>' + ' ;\n'
    output += '\t' + ' <http://data.ign.fr/def/geometrie#centroid> ' + ign_centroid_epci_uri.replace(
        "$", str(code_epci)) + ' .\n'
    output += '\n'

    # Write centroid
    centroid = geom.Centroid()
    wkt = 'POINT(' + str(centroid.GetX()) + ' ' + str(centroid.GetY()) + ')'
    geom_string = '"' + crs84_uri + ' ' + wkt + '"^^<http://www.opengis.net/ont/geosparql#wktLiteral>'

    output += ign_centroid_epci_uri.replace(
        "$", str(code_epci)) + ' a ' + ign_point_data_type + ' ;\n'
    output += '\t' + ' <http://www.opengis.net/ont/geosparql#asWKT> ' + geom_string + ' ;\n'
    output += '\t' + ' <http://data.ign.fr/def/geometrie#crs> ' + '<http://data.ign.fr/id/ignf/crs/WGS84GDD>' + ' ;\n'
    output += '\t' + ' <http://data.ign.fr/def/geometrie#coordX> "' + str(
        centroid.GetX()) + '"^^xsd:double ;\n'
    output += '\t' + ' <http://data.ign.fr/def/geometrie#coordY> "' + str(
        centroid.GetY()) + '"^^xsd:double .\n'
    output += '\n'

    ttlfile.write(output)
Exemplo n.º 10
0
def store_chef_lieu(feature):
    statut_chef_lieu = feature.GetFieldAsString("STATUT")
    insee_commune = feature.GetFieldAsString("INSEE_COM")
    geom = feature.GetGeometryRef()
    geom.Transform(utils.get_transform(2154, 4326))
    geom_string = str(geom.GetX()) + '|' + str(geom.GetY())
    if statut_chef_lieu == "Commune simple":
        chefs_lieu_commune.update({insee_commune: geom_string})
    elif statut_chef_lieu == "Arrondissement municipal":
        chefs_lieu_commune.update({insee_commune: geom_string})
    elif statut_chef_lieu == "Sous-préfecture":
        chefs_lieu_arrdt.update({insee_commune: geom_string})
        chefs_lieu_commune.update({insee_commune: geom_string})
    elif statut_chef_lieu == "Préfecture":
        chefs_lieu_dept.update({insee_commune: geom_string})
        chefs_lieu_commune.update({insee_commune: geom_string})
    elif statut_chef_lieu == "Préfecture de région":
        chefs_lieu_region.update({insee_commune: geom_string})
        chefs_lieu_dept.update({insee_commune: geom_string})
        chefs_lieu_commune.update({insee_commune: geom_string})
    elif statut_chef_lieu == "Capitale d'état":
        chefs_lieu_capitale.update({insee_commune: geom_string})
        chefs_lieu_arrdt.update({insee_commune: geom_string})
        chefs_lieu_dept.update({insee_commune: geom_string})
        chefs_lieu_commune.update({insee_commune: geom_string})
Exemplo n.º 11
0
    def sample(self, image, question, topk=5):
        """ Processes a question and image, passes it through the trained net and returns an answer """
        question = question.lower().replace("?", "")
        question = question.split(' ')
        q, q_len = self.encode_question(question)

        transform = utils.get_transform(config.image_size,
                                        config.central_fraction)
        inputImg = transform(image)

        with torch.no_grad():
            q = Variable(q.unsqueeze(0))
            q_len = Variable(torch.tensor([q_len]))
            v = Variable(self.resnet(inputImg.unsqueeze(0)))

            out = self.net(v, q, q_len)

            out = self.softmax(out)  # to get confidence

            answer = out.data.topk(topk, dim=1)  # top k number of answers

            answers = []

            for i in range(topk):
                answers.append((self.answers[int(answer[1][0][i])],
                                float(answer[0][0][i])))

        return answers
Exemplo n.º 12
0
def get_loader(image_path, train=False, val=False, test=False):
    """ Returns a data loader for the desired split """
    assert train + val + test == 1, 'need to set exactly one of {train, val, test} to True'
    split = VQA(utils.path_for(train=train, val=val, test=test, question=True),
                utils.path_for(train=train, val=val, test=test, answer=True),
                image_path,
                answerable_only=train,
                transform=utils.get_transform(config.image_size, test,
                                              config.central_fraction))
    if test:
        loader = torch.utils.data.DataLoader(
            split,
            batch_size=config.test_batch_size,
            shuffle=train,  # only shuffle the data in training
            pin_memory=True,
            num_workers=config.data_workers,
            collate_fn=collate_fn,
        )
    else:
        loader = torch.utils.data.DataLoader(
            split,
            batch_size=config.batch_size,
            shuffle=train,  # only shuffle the data in training
            pin_memory=True,
            num_workers=config.data_workers,
            collate_fn=collate_fn,
        )
    return loader
Exemplo n.º 13
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch-size', '-bs', type=int, default=64)
    parser.add_argument('--epochs', type=int, default=20)
    parser.add_argument('--learning-rate', '-lr', type=float, default=1e-3)
    parser.add_argument('--layers', '-l', type=int, default=20)
    parser.add_argument('--channels', '-ch', type=int, default=64)
    parser.add_argument('--cuda', action='store_true')
    parser.add_argument('--model-dir', type=str, default='models')
    parser.add_argument('--parallel', action='store_true')
    parser.add_argument('--workers', type=int, default=0)
    parser.add_argument('--log-interval', type=int, default=1000)
    parser.add_argument('--crop-size', type=int, default=64)
    args = parser.parse_args()

    args.cuda = args.cuda and torch.cuda.is_available()
    os.makedirs(args.model_dir, exist_ok=True)

    print(args)

    net = DeepClassAwareDenoiseNet(3, args.channels, args.layers)

    dataloader = data.DataLoader(NoisyCoco(root='data/train2017',
                                           transform=get_transform(),
                                           crop_size=args.crop_size),
                                 batch_size=args.batch_size,
                                 shuffle=True,
                                 num_workers=args.workers)

    solver = Solver(args, net, dataloader)
    solver.solve()
Exemplo n.º 14
0
    def loadImage(self, idx):
        ds = self.ds

        ## load + crop
        orig_img = ds.get_img(idx)
        orig_keypoints = ds.get_kps(idx)
        kptmp = orig_keypoints.copy()
        c = ds.get_center(idx)
        s = ds.get_scale(idx)

        cropped = utils.crop(orig_img, c, s, (self.input_res, self.input_res))
        for i in range(np.shape(orig_keypoints)[1]):
            if orig_keypoints[0, i, 0] > 0:
                orig_keypoints[0, i, :2] = utils.transform(orig_keypoints[0, i, :2], c, s, (self.input_res, self.input_res))
        keypoints = np.copy(orig_keypoints)

        ## augmentation -- to be done to cropped image
        height, width = cropped.shape[0:2]
        center = np.array((width / 2, height / 2))
        scale = max(height, width) / 200
        aug_rot = (np.random.random() * 2 - 1) * 30.
        aug_scale = np.random.random() * (1.25 - 0.75) + 0.75
        scale *= aug_scale
        mat_mask = utils.get_transform(center, scale, (self.output_res, self.output_res), aug_rot)[:2]
        mat = utils.get_transform(center, scale, (self.input_res, self.input_res), aug_rot)[:2]
        inp = cv2.warpAffine(cropped, mat, (self.input_res, self.input_res)).astype(np.float32) / 255
        keypoints[:, :, 0:2] = utils.kpt_affine(keypoints[:, :, 0:2], mat_mask)
        if np.random.randint(2) == 0:
            inp = self.preprocess(inp)
            inp = inp[:, ::-1]
            keypoints = keypoints[:, ds.flipped_parts['mpii']]
            keypoints[:, :, 0] = self.output_res - keypoints[:, :, 0]
            orig_keypoints = orig_keypoints[:, ds.flipped_parts['mpii']]
            orig_keypoints[:, :, 0] = self.input_res - orig_keypoints[:, :, 0]

        ## set keypoints to 0 when were not visible initially (so heatmap all 0s)
        for i in range(np.shape(orig_keypoints)[1]):
            if kptmp[0, i, 0] == 0 and kptmp[0, i, 1] == 0:
                keypoints[0, i, 0] = 0
                keypoints[0, i, 1] = 0
                orig_keypoints[0, i, 0] = 0
                orig_keypoints[0, i, 1] = 0

        ## generate heatmaps on outres
        heatmaps = self.generateHeatmap(keypoints)
        # inp.transpose
        return torch.tensor(inp.astype(np.float32)).permute(2, 0, 1), torch.tensor(heatmaps.astype(np.float32))
Exemplo n.º 15
0
    def __init__(self,
                 questions_path1,
                 questions_path2,
                 answers_path1,
                 answers_path2,
                 image_path,
                 fdict_path,
                 answerable_only=False):
        super(VQA, self).__init__()
        with open(questions_path1, 'r') as fd:
            questions_json1 = json.load(fd)
        with open(answers_path1, 'r') as fd:
            answers_json1 = json.load(fd)
        with open(questions_path2, 'r') as fd:
            questions_json2 = json.load(fd)
        with open(answers_path2, 'r') as fd:
            answers_json2 = json.load(fd)
        with open(config.vocabulary_path, 'r') as fd:
            vocab_json = json.load(fd)
        self._check_integrity(questions_json, answers_json)

        # vocab
        self.vocab = vocab_json
        self.token_to_index = self.vocab['question']
        self.answer_to_index = self.vocab['answer']

        # q and a
        self.questions1 = list(prepare_questions(questions_json1))
        self.answers1 = list(prepare_answers(answers_json1))
        self.questions1 = [self._encode_question(q) for q in self.questions1]
        self.answers1 = [self._encode_answers(a) for a in self.answers1]

        self.questions2 = list(prepare_questions(questions_json2))
        self.answers2 = list(prepare_answers(answers_json2))
        self.questions2 = [self._encode_question(q) for q in self.questions2]
        self.answers2 = [self._encode_answers(a) for a in self.answers2]

        # v
        self.image_path = image_path
        #self.coco_id_to_index = self._create_coco_id_to_index()
        self.coco_ids1 = [q['image_id'] for q in questions_json1['questions']]
        self.coco_ids2 = [q['image_id'] for q in questions_json2['questions']]
        self.transform = utils.get_transform(config.image_size,
                                             config.central_fraction)

        #self.id_to_filename = self._find_images()
        self.id_to_filename = np.load(fdict_path).item()
        self.sorted_ids = sorted(self.id_to_filename.keys()
                                 )  # used for deterministic iteration order
        print('found {} images in {}'.format(len(self.id_to_filename),
                                             image_path))
        #pdb.set_trace()

        # only use questions that have at least one answer?
        self.answerable_only = answerable_only
        if self.answerable_only:
            self.answerable = self._find_answerable()
Exemplo n.º 16
0
    def box_contains_bubbles(self, box, threshold):
        im = utils.get_transform(box, threshold)
        contours, _ = cv.findContours(im, cv.RETR_EXTERNAL,
                                      cv.CHAIN_APPROX_SIMPLE)

        for contour in contours:
            if self.is_bubble(contour):
                return True

        return False
Exemplo n.º 17
0
 def load_image(self, path, show=False):
     transform = utils.get_transform(config.image_size,
                                     config.central_fraction)
     img = Image.open(path).convert('RGB')
     if show:
         plt.figure()
         plt.imshow(img)
     if transform is not None:
         img = transform(img)
     return img
Exemplo n.º 18
0
def main(args):
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    print("load dataset")
    num_classes = 2
    data = HandDataset(args.data_path, utils.get_transform(train=True))

    indices = torch.randperm(len(data)).tolist()
    test_cnt = int(len(data) / 10)
    dataset = torch.utils.data.Subset(data, indices[:-test_cnt])
    dataset_test = torch.utils.data.Subset(dataset, indices[-test_cnt:])

    # 定义训练和验证数据加载器
    data_loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=2,
        shuffle=True,
        num_workers=4,
        collate_fn=lambda x: tuple(zip(*x)))

    data_loader_test = torch.utils.data.DataLoader(
        dataset_test,
        batch_size=1,
        shuffle=False,
        num_workers=4,
        collate_fn=lambda x: tuple(zip(*x)))

    print("load model")
    model = MaskRcnn.get_pretrained_resnet50_model(num_classes)

    model.to(device)

    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params,
                                lr=0.005,
                                momentum=0.9,
                                weight_decay=0.0005)
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=3,
                                                   gamma=0.1)
    print("begin train")
    num_epochs = 10

    for epoch in range(num_epochs):
        utils.train_one_epoch(model,
                              optimizer,
                              data_loader,
                              device,
                              epoch,
                              print_freq=10)
    lr_scheduler.step()
    # evaluate(model, data_loader_test, device=device)

    print("That's it!")
Exemplo n.º 19
0
def main():
    global img
    parser = argparse.ArgumentParser()
    parser.add_argument('input_dir', help='path to image.')
    # parser.add_argument('question', help='question about image')
    args = parser.parse_args()

    q = input('Please input the question: ')

    # Image preprocess
    img = Image.open(args.input_dir).convert('RGB')
    transform = utils.get_transform(config.image_size, config.central_fraction)
    v = transform(img)

    net = ImageProcessor()
    net.eval()

    v = v.unsqueeze(dim=0)
    with torch.no_grad():
        v = net(v)

    # Question preprocess
    q = q.lower()[:-1]
    q = q.split(' ')
    q_len = torch.tensor([len(q)], dtype=torch.long)

    max_question_length = 23
    with open(config.vocabulary_path, 'r') as fd:
        vocab_json = json.load(fd)

    vec = torch.zeros(max_question_length).long()

    token_to_index = vocab_json['question']

    for i, token in enumerate(q):
        index = token_to_index.get(token, 0)
        vec[i] = index

    vec = vec.unsqueeze(dim=0)

    num_tokens = len(token_to_index) + 1

    log = torch.load('2017-08-04_00.55.19.pth', map_location='cpu')
    net = torch.nn.DataParallel(Net(num_tokens))
    net.load_state_dict(log['weights'])
    net.eval()
    net.module.apply_attention.register_forward_hook(attn_hook_function)
    with torch.no_grad():
        out = net(v, vec, q_len)
    conf, ans = out.topk(k=5, dim=1)
    conf, ans = conf.tolist()[0], ans.tolist()[0]

    print('TOP 5 PREDICT ANSWER:')
    for c, a in zip(conf, ans):
        print(get_key(a, vocab_json['answer']), c)
Exemplo n.º 20
0
def get_dataloader(config):
    transform_img_train, transform_label_train = get_transform(config,
                                                               is_train=True)
    transform_img_val, transform_label_val = get_transform(config,
                                                           is_train=False)
    train_set = Dataset(config['root'] + '/train', config['size'],
                        transform_img_train, transform_label_train)
    val_set = Dataset(config['root'] + '/validate', config['size'],
                      transform_img_val, transform_label_val)
    train_loader = DataLoader(train_set,
                              batch_size=config['batch_size'],
                              shuffle=True,
                              num_workers=0,
                              drop_last=False)
    val_loader = DataLoader(val_set,
                            batch_size=1,
                            shuffle=False,
                            num_workers=0,
                            drop_last=False)
    return train_loader, val_loader
Exemplo n.º 21
0
def create_coco_loader(path):
    transform = utils.get_transform(config.image_size, config.central_fraction)
    dataset = CocoImages(path, transform=transform)
    data_loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=config.preprocess_batch_size,
        num_workers=config.data_workers,
        shuffle=False,
        pin_memory=True,
    )
    return data_loader
Exemplo n.º 22
0
def create_vizwiz_loader(*paths):
    transform = utils.get_transform(config.image_size, config.central_fraction)
    datasets = [data.VizWizImages(path, transform=transform) for path in paths]
    dataset = data.Composite(*datasets)
    data_loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=config.preprocess_batch_size,
        num_workers=config.data_workers,
        shuffle=False,
        pin_memory=True,
    )
    return data_loader
Exemplo n.º 23
0
def main():
    model = Generator(N_RESIDUAL_BLOCKS, USE_DROPOUT)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model.load_state_dict(torch.load(MODEL_PATH, map_location=device))
    model.eval()
    transform = get_transform(IMAGE_SIZE)
    validation_data = torchvision.datasets.ImageFolder(
        root=VALIDATION_DATA_PATH, transform=transform)

    for i in range(N_VALIDATION_IMAGES):
        prediction = model(validation_data[i][0].unsqueeze(0))
        save_image(prediction, f'prediction{i}.png', normalize=True)
Exemplo n.º 24
0
def write_geometry(feature, ttlfile):

    insee_arr = feature.GetField("INSEE_ARR")
    insee_dept = feature.GetField("INSEE_DEP")
    unique_id = insee_dept + insee_arr

    # Write polygon
    geom = feature.GetGeometryRef()
    geom.Transform(utils.get_transform(2154, 4326))
    geom_string = '"' + crs84_uri + ' ' + geom.ExportToWkt(
    ) + '"^^<http://www.opengis.net/ont/geosparql#wktLiteral>'
    polygon_uri = ign_multipolygon_arrondissement_uri.replace("$", unique_id)

    output = polygon_uri + ' a ' + ign_multipolygon_data_type + ' ;\n'
    output += '\t' + ' <http://www.opengis.net/ont/geosparql#asWKT> ' + geom_string + ' ;\n'
    output += '\t' + ' <http://data.ign.fr/def/geometrie#crs> ' + '<http://data.ign.fr/id/ignf/crs/WGS84GDD>' + ' ;\n'
    output += '\t' + ' <http://data.ign.fr/def/geometrie#centroid> ' + ign_centroid_arrondissement_uri.replace(
        "$", unique_id) + ' .\n'
    output += '\n'

    # Write centroid
    centroid = geom.Centroid()
    wkt = 'POINT(' + str(centroid.GetX()) + ' ' + str(centroid.GetY()) + ')'
    geom_string = '"' + crs84_uri + ' ' + wkt + '"^^<http://www.opengis.net/ont/geosparql#wktLiteral>'

    output += ign_centroid_arrondissement_uri.replace(
        "$", unique_id) + ' a ' + ign_point_data_type + ' ;\n'
    output += '\t' + ' <http://www.opengis.net/ont/geosparql#asWKT> ' + geom_string + ' ;\n'
    output += '\t' + ' <http://data.ign.fr/def/geometrie#crs> ' + '<http://data.ign.fr/id/ignf/crs/WGS84GDD>' + ' ;\n'
    output += '\t' + ' <http://data.ign.fr/def/geometrie#coordX> "' + str(
        centroid.GetX()) + '"^^xsd:double ;\n'
    output += '\t' + ' <http://data.ign.fr/def/geometrie#coordY> "' + str(
        centroid.GetY()) + '"^^xsd:double .\n'
    output += '\n'

    # Write chef lieu
    insee_commune = chef_lieu.get_cl_arrdt_commune(insee_dept)
    if insee_commune is not None:
        lon, lat, wkt = chef_lieu.get_cl_arrdt_coords_and_wkt(insee_commune)
        geom_string = '"' + crs84_uri + ' ' + wkt + '"^^<http://www.opengis.net/ont/geosparql#wktLiteral>'

        output += ign_cheflieu_arrondissement_uri.replace(
            "$", insee_commune) + ' a ' + ign_point_data_type + ' ;\n'
        output += '\t' + ' <http://www.opengis.net/ont/geosparql#asWKT> ' + geom_string + ' ;\n'
        output += '\t' + ' <http://data.ign.fr/def/geometrie#crs> ' + '<http://data.ign.fr/id/ignf/crs/WGS84GDD>' + ' ;\n'
        output += '\t' + ' <http://data.ign.fr/def/geometrie#coordX> "' + str(
            lon) + '"^^xsd:double ;\n'
        output += '\t' + ' <http://data.ign.fr/def/geometrie#coordY> "' + str(
            lat) + '"^^xsd:double .\n'
        output += '\n'

    ttlfile.write(output)
Exemplo n.º 25
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--input-file',
                        '-i',
                        type=str,
                        default=None,
                        help='Path to an input file.')
    parser.add_argument(
        '--model',
        '-m',
        type=str,
        choices=['resnet18Fer2013', 'resnet18KDEF', 'resnet18Dartmouth'],
        default='resnet18Fer2013',
        help='Model name.')
    args = parser.parse_args()
    input_file_name = args.input_file
    model_name = args.model
    img = Image.open(input_file_name)
    if model_name == 'resnet18Fer2013':
        net = resnet18()
        weights = 'resnet18Fer2013.pth'
        transform = get_transform(48)
    elif model_name == 'resnet18KDEF':
        net = resnet18()
        weights = 'resnet18KDEF.pth'
        transform = get_transform(224)
    elif model_name == 'resnet18Dartmouth':
        net = resnet18()
        weights = 'resnet18Dartmouth.pth'
        transform = get_transform(224)
    path = Path.joinpath(Path(), 'weights', weights)
    net.load_state_dict(torch.load(path, map_location=torch.device('cpu')))
    net.eval()
    img_tensor = transform(img).unsqueeze(0)
    with torch.no_grad():
        softmax = torch.nn.Softmax(dim=1)
        preds = softmax(net(img_tensor)).numpy().ravel()
        for i in range(len(CLASSES)):
            print('{:>8}: {:5.2f}%'.format(CLASSES[i], 100 * preds[i]))
Exemplo n.º 26
0
def predict_fn(input_object, model):

    print('Inferring class of input data.')

    tfms = get_transform()

    input_data = tfms(input_object)

    device = get_default_device()

    output = predict_image(input_data, model)

    return output
Exemplo n.º 27
0
def create_coco_loader(*paths):
    transform = utils.get_transform(config.image_size, config.central_fraction)
    datasets = [data.CocoImages(path, transform=transform) for path in paths]
    #ipdb.set_trace()  ## datasets[0].__getitem__(116591)[0]  print the largest coco_id - this is within torch int64 bound!
    dataset = data.Composite(*datasets)
    data_loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=config.preprocess_batch_size,
        num_workers=config.data_workers,
        shuffle=False,
        pin_memory=True,
    )
    return data_loader
Exemplo n.º 28
0
def main():
    args = parse_arguments()

    # dataset, dataloader
    transforms = get_transform()
    train_dataset = Dataset.TrackData_RL(args.train_data, transform=transforms)
    train_loader = DataLoader(train_dataset,
                              num_workers=args.num_workers,
                              shuffle=True,
                              batch_size=1)

    # model, environment
    R = Reinforce(train_loader, transforms)
    env = Env(args)

    start_epoch = 1

    if args.init_sl:
        if os.path.isfile(args.init_sl):
            print("=> loading checkpoint '{}'".format(args.init_sl))
            checkpoint = torch.load(args.init_sl)
            R.agent.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.init_sl))
    elif args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            start_epoch = checkpoint['epoch']
            R.agent.load_state_dict(checkpoint['state_dict'])
            R.optim.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    for epoch in range(start_epoch, args.max_epochs + 1):
        R.train(env, epoch, args.gamma, logging=True)
        if epoch % args.save_freq == 0:
            # save model
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': R.agent.state_dict(),
                    'optimizer': R.optim.state_dict(),
                },
                dir='cv/%s/' % args.name)
Exemplo n.º 29
0
def write_geometry(feature, ttlfile):

    insee_region = feature.GetField("INSEE_REG")
    polygon_uri = ign_multipolygon_region_uri.replace("$", insee_region)

    geom = feature.GetGeometryRef()
    geom.Transform(utils.get_transform(2154, 4326))
    geom_string = '"' + crs84_uri + ' ' + geom.ExportToWkt(
    ) + '"^^<http://www.opengis.net/ont/geosparql#wktLiteral>'

    output = polygon_uri + ' a ' + ign_multipolygon_data_type + ' ;\n'
    output += '\t' + ' <http://www.opengis.net/ont/geosparql#asWKT> ' + geom_string + ' ;\n'
    output += '\t' + ' <http://data.ign.fr/def/geometrie#crs> ' + '<http://data.ign.fr/id/ignf/crs/WGS84GDD>' + ' .\n'
    output += '\n'

    ttlfile.write(output)
Exemplo n.º 30
0
def get_train_data():
    normal_train, normal_test = get_sentence(args.train_data, args.test_data)
    transfer_train, transfer_test = get_sentence(args.transfer_train_data, args.transfer_test_data)
    char2id, id2char, tag2id, id2tag, transfer_tag2id, transfer_id2tag = get_transform(normal_train + transfer_train,
                                                                                       args.map_path,
                                                                                       args.tag2label_path,
                                                                                       args.transfer_tag2label_path)
    train_data = preprocess_data(normal_train, char2id, tag2id)
    train_manager = BatchManager(train_data, args.batch_size)
    test_data = preprocess_data(normal_test, char2id, tag2id)
    test_manager = BatchManager(test_data, args.batch_size)
    transfer_train_data = preprocess_data(transfer_train, char2id, transfer_tag2id)
    transfer_train_manager = BatchManager(transfer_train_data, args.batch_size)
    transfer_test_data = preprocess_data(transfer_test, char2id, transfer_tag2id)
    transfer_test_manager = BatchManager(transfer_test_data, args.batch_size)

    return train_manager, test_manager, transfer_train_manager, transfer_test_manager, id2char, id2tag, transfer_id2tag