Beispiel #1
0
def task5b(query, top, visualize=False, combine_models=False):
    constants = GlobalConstants()
    lsh = Model().load_model(constants.LSH_OBJECT)
    imageids, feat_vectors, query_vector = lsh.query(query, top)
    print(imageids[:top])
    if visualize:
        result = []
        for rank, image in enumerate(imageids[:top]):
            res = {
                'path': os.path.join("Hands", image),
                'imageId': image,
                'rank': rank + 1
            }
            result.append(res)
        if combine_models:
            extract = "HOG + CM"
        else:
            extract = "HOG"
        title = {
            "Search": "Locality Sensitive Hashing (LSH)",
            "Feature Extraction": extract,
            "L": lsh.get_l(),
            "K": lsh.get_k(),
            "Dimensionality Reduction": "NMF",
            "t": 20,
            "Distance": "Euclidean"
        }
        print(os.path.abspath(os.path.join("Hands", query)))
        show_images(os.path.abspath(os.path.join("Hands", query)),
                    result,
                    title,
                    rank=True)

    return imageids, feat_vectors, query_vector
Beispiel #2
0
def main(exp_const, data_const, model_const):
    print('Creating network ...')
    model = Model()
    model.const = model_const
    model.encoder = Encoder(model.const.encoder).cuda()
    encoder_path = os.path.join(exp_const.model_dir,
                                'encoder_' + str(model.const.model_num))
    model.encoder.load_state_dict(torch.load(encoder_path))

    print('Creating dataloader ...')
    dataset = VisualFeaturesDataset(data_const)
    dataloader = DataLoader(dataset,
                            batch_size=exp_const.batch_size,
                            shuffle=True)

    print('Get features ...')
    features = get_visual_features(model, dataloader, exp_const)

    print('Save features h5py ...')
    word_features_h5py = h5py.File(
        os.path.join(exp_const.exp_dir, 'word_features.h5py'), 'w')
    word_features_h5py.create_dataset('features',
                                      data=features,
                                      chunks=(1, features.shape[1]))
    word_features_h5py.create_dataset('mean', data=np.mean(features, axis=0))
    word_features_h5py.close()

    print('Save features word idx json ...')
    word_to_idx_json = os.path.join(exp_const.exp_dir, 'word_to_idx.json')
    io.dump_json_object(dataloader.dataset.word_to_idx, word_to_idx_json)
Beispiel #3
0
def execute_demo(language):
    if language == 'english':
        word_emb = load_word_embeddings('english')
    elif language == 'spanish':
        word_emb = load_word_embeddings('spanish')

    data = Dataset(language)

    print("{}: {} training - {} dev".format(language, len(data.trainset),
                                            len(data.devset)))

    #for sent in data.trainset:
    # Gold label -> 0 if the word is not complex, 1 if the word is complex.
    #print(sent['sentence'], sent['target_word'], sent['gold_label'])

    baseline = Baseline(language)

    model = Model(language)

    model.train(data.trainset, word_emb)

    predictions = model.test(data.devset, word_emb)

    gold_labels = [sent['gold_label'] for sent in data.devset]

    report_score(gold_labels, predictions)
Beispiel #4
0
def main(exp_const,data_const,model_const):
    io.mkdir_if_not_exists(exp_const.exp_dir,recursive=True)
    io.mkdir_if_not_exists(exp_const.log_dir)
    io.mkdir_if_not_exists(exp_const.model_dir)
    configure(exp_const.log_dir)
    save_constants({
        'exp': exp_const,
        'data': data_const,
        'model': model_const},
        exp_const.exp_dir)

    print('Creating network ...')
    model = Model()
    model.const = model_const
    model.encoder = Encoder(model.const.encoder).cuda()
    model.decoder = Decoder(model.const.decoder).cuda()

    encoder_path = os.path.join(
        exp_const.model_dir,
        f'encoder_{-1}')
    torch.save(model.encoder.state_dict(),encoder_path)

    decoder_path = os.path.join(
        exp_const.model_dir,
        f'decoder_{-1}')
    torch.save(model.decoder.state_dict(),decoder_path)

    print('Creating dataloader ...')
    dataset = VisualFeaturesDataset(data_const)
    dataloader = DataLoader(
        dataset,
        batch_size=exp_const.batch_size,
        shuffle=True)

    train_model(model,dataloader,exp_const)
Beispiel #5
0
def save_model_file():
    constants = GlobalConstants()
    results = MongoWrapper(constants.Mongo().DB_NAME).find(
        constants.CM.lower(), {}, {
            "_id": 0,
            "featureVector": 1
        })
    featurearray = np.array(
        list(map(lambda x: x['featureVector'], list(results))))
    model = Model()
    model.save_model(featurearray, 'cm_np')
Beispiel #6
0
def get_x_train(fea_ext_mod, dim_red_mod, k_value, train_set):
    model_interact = Model()
    dim_reduction = DimensionReduction(fea_ext_mod,
                                       dim_red_mod,
                                       k_value,
                                       folder_metadata=train_set,
                                       metadata_collection="labelled")
    obj_lat, feat_lat, model = dim_reduction.execute()
    filename = "{0}_{1}_{2}_{3}".format(fea_ext_mod, dim_red_mod, str(k_value),
                                        os.path.basename(train_set))
    model_interact.save_model(model=model, filename=filename)
    return obj_lat
Beispiel #7
0
def dimension_reduction():
    # save_model_file()
    constants = GlobalConstants()
    model = Model()
    features = model.load_model('cm_np')
    redn = DimensionReduction(dimension_reduction_model=constants.PCA,
                              extractor_model=constants.CM,
                              matrix=features,
                              conversion=True,
                              k_value=500)
    redn.execute()
    pass
Beispiel #8
0
def main(exp_const, data_const, model_const):
    io.mkdir_if_not_exists(exp_const.exp_dir, recursive=True)
    io.mkdir_if_not_exists(exp_const.log_dir)
    io.mkdir_if_not_exists(exp_const.model_dir)
    io.mkdir_if_not_exists(exp_const.vis_dir)
    configure(exp_const.log_dir)
    save_constants({
        'exp': exp_const,
        'data': data_const,
        'model': model_const
    }, exp_const.exp_dir)

    print('Creating network ...')
    model = Model()
    model.const = model_const
    model.net = NET(model.const.net)
    if model.const.model_num is not None:
        model.net.load_state_dict(torch.load(model.const.net_path))
    model.net.cuda()
    model.img_mean = np.array([0.485, 0.456, 0.406])
    model.img_std = np.array([0.229, 0.224, 0.225])
    model.to_file(os.path.join(exp_const.exp_dir, 'model.txt'))

    print('Creating dataloader ...')
    dataloaders = {}
    for mode, subset in exp_const.subset.items():
        data_const = copy.deepcopy(data_const)
        data_const.subset = subset
        dataset = DATASET(data_const)
        dataloaders[mode] = DataLoader(dataset,
                                       batch_size=exp_const.batch_size,
                                       shuffle=True,
                                       num_workers=exp_const.num_workers)

    train_model(model, dataloaders, exp_const)
Beispiel #9
0
def main(exp_const, data_const, model_const):
    print('Loading model ...')
    model = Model()
    model.const = model_const
    model.hoi_classifier = HoiClassifier(model.const.hoi_classifier).cuda()
    if model.const.model_num == -1:
        print(
            'No pretrained model will be loaded since model_num is set to -1')
    else:
        model.hoi_classifier.load_state_dict(
            torch.load(model.const.hoi_classifier.model_pth))

    print('Creating data loader ...')
    dataset = Features(data_const)

    eval_model(model, dataset, exp_const)
Beispiel #10
0
def get_x_test(fea_ext_mod, dim_red_mod, k_value, train_set, test_set):
    model_interact = Model()
    dim_reduction = DimensionReduction(fea_ext_mod, dim_red_mod, k_value)
    filename = "{0}_{1}_{2}_{3}".format(fea_ext_mod, dim_red_mod, str(k_value),
                                        os.path.basename(train_set))
    model = model_interact.load_model(filename=filename)
    red_dims = []
    unlabelled_image_list = os.listdir(test_set)
    for image in unlabelled_image_list:
        red_dim = dim_reduction.compute_query_image(model, test_set, image)
        red_dims.append(red_dim[0])
    df = pd.DataFrame({
        "imageId": unlabelled_image_list,
        "reducedDimensions": red_dims
    })
    return df
Beispiel #11
0
def main(exp_const,data_const,model_const):
    print('Creating network ...')
    model = Model()
    model.const = model_const
    model.net = LogBilinear(model.const.net)
    if model.const.model_num is not None:
        model.net.load_state_dict(torch.load(model.const.net_path))
    
    embeddings = 0.5*(model.net.embed1.W.weight + model.net.embed2.W.weight)
    embeddings = embeddings.data.numpy()
    embeddings_json = os.path.join(exp_const.exp_dir,'visual_embeddings.npy')
    np.save(embeddings_json,embeddings)

    print('Saving word_to_idx.json ...')
    dataset = MultiSenseCooccurDataset(data_const)
    word_to_idx = dataset.word_to_idx
    word_to_idx_json = os.path.join(exp_const.exp_dir,'word_to_idx.json')
    io.dump_json_object(word_to_idx,word_to_idx_json)
Beispiel #12
0
    def nmf(self):
        """
        Performs NMF dimensionality reduction
        :return:
        """
        constants = self.constants.Nmf()
        if self.binary_image_metadata:
            data = self.get_binary_image_metadata_matrix()
        elif self.subject_subject:
            data = self.matrix
        else:
            if not self.matrix:
                data = self.get_object_feature_matrix()
            else:
                data = self.matrix

        if not data.size == 0:
            obj_feature = np.array(data['featureVector'].tolist())
            if (obj_feature < 0).any():
                print("NMF does not accept negative values")
                return

            model = NMF(n_components=self.k_value,
                        beta_loss=constants.BETA_LOSS_KL,
                        init=constants.INIT_MATRIX,
                        random_state=0,
                        solver='mu',
                        max_iter=1000)
            w = model.fit_transform(obj_feature)
            h = model.components_
            if self.save_model:
                model = Model()
                model.save_model(
                    w,
                    "{}_{}_w".format(self.extractor_model.lower(),
                                     self.dimension_reduction_model.lower()))
                model.save_model(
                    h,
                    "{}_{}_h".format(self.extractor_model.lower(),
                                     self.dimension_reduction_model.lower()))
                return self

            if not self.conversion:
                return w, h
            tt1 = time.time()
            data_lat = pd.DataFrame({
                "imageId": data['imageId'],
                "reducedDimensions": w.tolist()
            })
            # for i in range(h.shape[0]):
            #     print("Latent Feature: {}\n{}".format(i + 1, sorted(((i, v) for i, v in enumerate(h[i])),
            #                                                         key=lambda x: x[1], reverse=True)))

            # print("\n\nTime Taken for NMF {}\n".format(time.time() - tt1))
            return data_lat, h, model
        raise \
            Exception("Data in database is empty, Run Task 2 of Phase 1 (Insert feature extracted records in db )\n\n")
def get_probability_revelance_feedback(query_id, no_images):
    imageids, feat_vectors, query_vector = task5.task5b(query_id, no_images)
    model = Model()
    bin_matrix = get_binary_matrix(feat_vectors)
    imageid_index, feature_index = preprocess(feat_vectors)
    model.save_model(imageids, 'imageids')
    model.save_model(feat_vectors, 'feat_vectors')
    model.save_model(bin_matrix, 'bin_matrix')
    model.save_model(imageid_index, 'imageid_index')
    model.save_model(feature_index, 'feature_index')
    relevancefeedback.relevance_fdbk("PROBABILITY",
                                     query_id,
                                     get_relevance(feat_vectors.keys(),
                                                   bin_matrix, imageid_index,
                                                   feature_index,
                                                   feat_vectors),
                                     feat_vectors,
                                     query_image_vector=query_vector)
Beispiel #14
0
def evaluate_lookup_table(lookup_table, prior_pool, CONFIG, evaluate_nums=10):
    for i in range(evaluate_nums):
        gen_mac, arch_param = prior_pool.generate_arch_param(lookup_table)
        gen_mac = lookup_table.get_model_flops(arch_param.cuda())
        layers_config = lookup_table.decode_arch_param(arch_param)

        model = Model(layers_config, CONFIG.dataset, CONFIG.classes)

        cal_model_efficient(model, CONFIG)
Beispiel #15
0
    def extract_sift(self, image_name, process_record=False):
        """
        Calculate SIFT Features for an image
        :param process_record:
        :param image_name:
        :param image: Image name (File Name of the image)
        :return:
        """

        if not validate.validate_image(self.folder, image_name):
            raise Exception('File is not valid')

        img = cv2.imread(os.path.join(self.folder, image_name))
        gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        sft = cv2.xfeatures2d.SIFT_create()
        kp, des = sft.detectAndCompute(gray_img, None)
        if process_record:
            return {
                'imageId':
                image_name,
                'kps': [{
                    'x': k.pt[0],
                    'y': k.pt[1],
                    'size': k.size,
                    'angle': k.angle,
                    'response': k.response
                } for k in kp],
                'featureVector': [i.tolist() for i in des],
                "path":
                os.path.abspath(os.path.join(self.folder, image_name))
            }
        model_file = "{}_{}_{}".format(self.folder, self.model.lower(),
                                       self.constants.BOW_MODEL.lower())

        if validate.validate_file(
                os.path.join(self.constants.MODELS_FOLDER, model_file)):
            model = Model()
            knn = model.load_model(model_file)
            histogram = np.zeros(knn.n_clusters)
            for desc in des:
                index = knn.predict([desc])
                histogram[index] += 1 / len(kp)
            return histogram
        return des
Beispiel #16
0
    def query(self, query_id, top):
        model = Model()
        overall_count = 0
        constants = GlobalConstants()
        l_hash, l_bucket = model.load_model(
            constants.LSH_L_HASHES), model.load_model(constants.LSH_L_BUCKETS)
        choices_per_layer, choices_per_hash = [], []
        query_vector = self.data[self.image_ids.index(query_id)]
        imageid_distances = []

        for layer in range(self.layers):
            k_hash = l_hash["L{}".format(layer)]
            k_bucket = l_bucket["L{}".format(layer)]
            choices_per_hash = []

            for kid in range(self.khash_count):
                choices_per_hash.append(k_bucket["K{}".format(kid)].get(
                    k_hash["K{}".format(kid)].get(query_id)))
            intersected_choices = set.intersection(*map(set, choices_per_hash))
            overall_count += len(intersected_choices)
            choices_per_layer.append(intersected_choices)
        choices = set.union(*map(set, choices_per_layer))

        for image_id in choices:
            if image_id != query_id:
                vector = self.data[self.image_ids.index(image_id)]
                imageid_distances.append(
                    (image_id, euclidean(query_vector, vector)))

        imageid_distances = sorted(imageid_distances,
                                   key=operator.itemgetter(1))
        choices = [image for (image, distance) in imageid_distances]

        if len(choices) < top:
            # new_choices = search_neighbors()
            pass

        feat_vectors = {}
        print("Overall images: {}".format(overall_count))
        print("Unique images: {}".format(len(choices)))
        for i in choices[:top]:
            feat_vectors[i] = self.data[self.image_ids.index(i)]
        return choices[:top], feat_vectors, query_vector
Beispiel #17
0
def get_model(config_path,
              target_flops,
              num_classes=1000,
              in_chans=3,
              activation="relu",
              se=False,
              bn_momentum=0.1):
    CONFIG = get_config(config_path)
    if CONFIG.cuda:
        device = torch.device("cuda" if (
            torch.cuda.is_available() and CONFIG.ngpu > 0) else "cpu")
    else:
        device = torch.device("cpu")

    lookup_table = LookUpTable(CONFIG)

    supernet = Supernet(CONFIG)
    arch_param_nums = supernet.get_arch_param_nums()

    generator = get_generator(CONFIG, arch_param_nums)

    if CONFIG.generator_pretrained is not None:
        generator.load_state_dict(
            torch.load(CONFIG.generator_pretrained)["model"])

    generator.to(device)
    prior_pool = PriorPool(lookup_table, arch_param_nums, None, None, None,
                           CONFIG)

    # Sample architecture parameter =======================
    prior = prior_pool.get_prior(target_flops)
    prior = prior.to(device)

    hardware_constraint = torch.tensor(target_flops).to(device)
    normalize_hardware_constraint = min_max_normalize(CONFIG.high_flops,
                                                      CONFIG.low_flops,
                                                      hardware_constraint)

    arch_param = generator(prior, normalize_hardware_constraint)
    arch_param = lookup_table.get_validation_arch_param(arch_param)

    gen_flops = lookup_table.get_model_flops(arch_param)

    logging.info("Generate flops : {}".format(gen_flops))

    layers_config = lookup_table.decode_arch_param(arch_param)
    model = Model(l_cfgs=layers_config,
                  dataset=CONFIG.dataset,
                  classes=CONFIG.classes,
                  activation=activation,
                  se=se,
                  bn_momentum=bn_momentum)

    cal_model_efficient(model, CONFIG)
    return model
Beispiel #18
0
def main():
    args = get_args()
    conf = __import__("config." + args.config, globals(), locals(), ["Conf"]).Conf
    helper = Helper(conf=conf)

    data = Data(conf)
    data.load_data()
    # you need to setup: data.train_loader/data.test_loader

    model = Model(conf).to(conf.device)
    print(model)
    training(conf, model, data.train_loader)
def main(exp_const,
         data_const_train,
         data_const_val,
         model_const,
         data_sign='hico'):
    io.mkdir_if_not_exists(exp_const.exp_dir, recursive=True)
    io.mkdir_if_not_exists(exp_const.log_dir)
    io.mkdir_if_not_exists(exp_const.model_dir)
    configure(exp_const.log_dir)
    save_constants(
        {
            'exp': exp_const,
            'data_train': data_const_train,
            'data_val': data_const_val,
            'model': model_const
        }, exp_const.exp_dir)

    print('Creating model ...')
    model = Model()
    model.const = model_const
    model.hoi_classifier = HoiClassifier(model.const.hoi_classifier,
                                         data_sign).cuda()
    model.to_txt(exp_const.exp_dir, single_file=True)

    print('Creating data loaders ...')
    dataset_train = Features(data_const_train)
    dataset_val = Features(data_const_val)

    train_model(model, dataset_train, dataset_val, exp_const)
Beispiel #20
0
def main():
    device = cfg.DEVICE

    data = dataset.KITTIDataset(cfg.PATH, cfg, mode='eval')
    val_dataloader = DataLoader(data, cfg.VAL_BATCH, shuffle=False)

    # Initialize the model
    mobilenetv2 = MobileNetV2()
    model = Model(features=mobilenetv2.features, bins=cfg.BIN).to(device)
    model_list = os.listdir(cfg.MODEL_DIR)
    # model.load_state_dict(torch.load(cfg.MODEL_DIR + "/%s" % sorted(model_list)[-1]))
    model.load_state_dict(
        torch.load(cfg.MODEL_DIR + '/model_2020-05-13-10-06-12.pth'))
    print(sorted(model_list)[-1])
    model.eval()

    for i, (batch, info) in enumerate(val_dataloader):
        # batch, centerAngle, info = data.EvalBatch()
        batch = torch.FloatTensor(batch).to(device)

        [orient, conf] = model(batch)
        orient = orient.cpu().data.numpy()
        conf = conf.cpu().data.numpy()

        alpha = angle_utils.recover_angle(conf.squeeze(0), orient.squeeze(0),
                                          cfg.BIN)
        rot_y = angle_utils.compute_orientaion(P2, info['bbox2d'], alpha)
        angle_utils.save_result(info, alpha, rot_y, i, mode='eval')
Beispiel #21
0
def main(exp_const, data_const, model_const):
    io.mkdir_if_not_exists(exp_const.exp_dir, recursive=True)
    io.mkdir_if_not_exists(exp_const.log_dir)
    io.mkdir_if_not_exists(exp_const.model_dir)
    configure(exp_const.log_dir)
    save_constants({
        'exp': exp_const,
        'data': data_const,
        'model': model_const
    }, exp_const.exp_dir)

    print('Creating model ...')
    model = Model()
    model.const = model_const
    model.concat_svm = ConcatSVM(model_const.concat_svm).cuda()
    model.to_txt(exp_const.exp_dir, single_file=True)

    print('Creating train data loader ...')
    train_data_const = copy.deepcopy(data_const)
    train_data_const.subset = 'train'
    train_data_loader = DataLoader(SemEval201810Dataset(train_data_const),
                                   batch_size=exp_const.batch_size,
                                   shuffle=True)

    print('Creating val data loader ...')
    val_data_const = copy.deepcopy(data_const)
    val_data_const.subset = 'val'
    val_data_loader = DataLoader(SemEval201810Dataset(val_data_const),
                                 batch_size=exp_const.batch_size,
                                 shuffle=False)

    print('Begin training ...')
    train_model(model, train_data_loader, val_data_loader, exp_const)
Beispiel #22
0
def main(exp_const, data_const, model_const):
    print('Creating model ...')
    model = Model()
    model.const = model_const
    model.concat_svm = ConcatSVM(model_const.concat_svm).cuda()

    print('Select best model ...')
    step_to_val_best_scores_tuple_json = os.path.join(
        exp_const.exp_dir, 'step_to_val_best_scores_tuple.json')
    step_to_val_best_scores_tuple = io.load_json_object(
        step_to_val_best_scores_tuple_json)
    best_step, best_scores_tuple = select_best_concat_svm(
        step_to_val_best_scores_tuple)
    model.concat_svm.const.thresh = best_scores_tuple[4]
    model_pth = os.path.join(exp_const.model_dir, f'{best_step}')
    model.concat_svm.load_state_dict(torch.load(model_pth))
    print(f'Selcted model at step: {best_step}')
    print(f'Selected thresh: {model.concat_svm.const.thresh}')

    print('Creating data loader ...')
    data_const = copy.deepcopy(data_const)
    data_loader = DataLoader(SemEval201810Dataset(data_const),
                             batch_size=exp_const.batch_size,
                             shuffle=False)

    print('Begin evaluation ...')
    result, correct_preds, incorrect_preds = eval_model(
        model, data_loader, exp_const)
    result_json = os.path.join(exp_const.exp_dir,
                               f'results_{data_const.subset}.json')
    io.dump_json_object(result, result_json)
    print(io.dumps_json_object(result))

    correct_preds_json = os.path.join(
        exp_const.exp_dir, f'correct_preds_{data_const.subset}.json')
    io.dump_json_object(correct_preds, correct_preds_json)

    incorrect_preds_json = os.path.join(
        exp_const.exp_dir, f'incorrect_preds_{data_const.subset}.json')
    io.dump_json_object(incorrect_preds, incorrect_preds_json)
Beispiel #23
0
def main(exp_const, data_const, model_const):
    io.mkdir_if_not_exists(exp_const.vis_dir)

    print('Creating network ...')
    model = Model()
    model.const = model_const

    model.net = ResnetModel(model.const.net)
    if model.const.model_num is not None:
        model.net.load_state_dict(torch.load(model.const.net_path))
    model.net.cuda()

    if exp_const.feedforward == False:
        model.AttributeEmbeddings = AttributeEmbeddings(
            model.const.AttributeEmbeddings)
        if model.const.model_num is not None:
            model.AttributeEmbeddings.load_state_dict(
                torch.load(model.const.AttributeEmbeddings_path))
        model.AttributeEmbeddings.cuda()

    model.img_mean = np.array([0.485, 0.456, 0.406])
    model.img_std = np.array([0.229, 0.224, 0.225])

    print('Creating dataloader ...')
    dataset = Cifar100Dataset(data_const)
    dataloader = DataLoader(dataset,
                            batch_size=exp_const.batch_size,
                            shuffle=True,
                            num_workers=exp_const.num_workers)

    eval_results = eval_model(model, dataloader, exp_const)

    confmat_npy = os.path.join(exp_const.exp_dir, 'confmat.npy')
    np.save(confmat_npy, eval_results['Conf Mat'])

    results = {
        'Avg Loss': eval_results['Avg Loss'],
        'Acc': eval_results['Acc']
    }

    print(results)
    results_json = os.path.join(exp_const.exp_dir, 'results.json')
    io.dump_json_object(results, results_json)

    embeddings_npy = os.path.join(exp_const.exp_dir, 'embeddings.npy')
    if exp_const.feedforward == True:
        np.save(embeddings_npy,
                model.net.resnet_layers.fc.weight.data.cpu().numpy())
    else:
        np.save(embeddings_npy,
                model.AttributeEmbeddings.embed.weight.data.cpu().numpy())

    labels_npy = os.path.join(exp_const.exp_dir, 'labels.npy')
    np.save(labels_npy, dataset.labels)
Beispiel #24
0
def main(exp_const, data_const, model_const):
    io.mkdir_if_not_exists(exp_const.exp_dir, recursive=True)
    io.mkdir_if_not_exists(exp_const.log_dir)
    io.mkdir_if_not_exists(exp_const.model_dir)
    io.mkdir_if_not_exists(exp_const.vis_dir)
    configure(exp_const.log_dir)
    save_constants({
        'exp': exp_const,
        'data': data_const,
        'model': model_const
    }, exp_const.exp_dir)

    print('Creating network ...')
    model = Model()
    model.const = model_const
    model.net = ResnetModel(model.const.net)
    model.embed2class = Embed2Class(model.const.embed2class)
    if model.const.model_num is not None:
        model.net.load_state_dict(torch.load(model.const.net_path))
        model.embed2class.load_state_dict(
            torch.load(model.const.embed2class_path))
    model.net.cuda()
    model.embed2class.cuda()
    model.img_mean = np.array([0.485, 0.456, 0.406])
    model.img_std = np.array([0.229, 0.224, 0.225])
    model.to_file(os.path.join(exp_const.exp_dir, 'model.txt'))

    print('Creating dataloader ...')
    dataloaders = {}
    for mode, subset in exp_const.subset.items():
        data_const = copy.deepcopy(data_const)
        if subset == 'train':
            data_const.train = True
        else:
            data_const.train = False
        dataset = Cifar100Dataset(data_const)
        collate_fn = dataset.get_collate_fn()
        dataloaders[mode] = DataLoader(dataset,
                                       batch_size=exp_const.batch_size,
                                       shuffle=True,
                                       num_workers=exp_const.num_workers,
                                       collate_fn=collate_fn)

    train_model(model, dataloaders, exp_const)
def main(exp_const, data_const, model_const):
    print('Creating network ...')
    model = Model()
    model.const = model_const
    model.net = LogBilinear(model.const.net)
    if model.const.model_num is not None:
        model.net.load_state_dict(torch.load(model.const.net_path))

    embeddings = 0.5 * (model.net.embed1.W.weight + model.net.embed2.W.weight)

    print('Computing transformed embeddings ...')
    xformed_embeddings = []
    for cooccur_type in exp_const.cooccur_types:
        xform = getattr(model.net, f'xform_{cooccur_type}')
        xformed_embeddings.append(xform(embeddings).cpu().data.numpy())
        print(cooccur_type, xformed_embeddings[-1].shape)

    xformed_embeddings = np.concatenate(xformed_embeddings, 1)
    print('Concatenated xformed embedding shape', xformed_embeddings.shape)
    xformed_embeddings_json = os.path.join(exp_const.exp_dir,
                                           'visual_embeddings_xformed.npy')
    np.save(xformed_embeddings_json, xformed_embeddings)
Beispiel #26
0
def find_best_model(options, scaled_points, random_polynomials):
    divisions = utils.create_divisions(options.divisions_quantity, scaled_points, options.train_percents)
    lambdas = utils.create_lambdas()
    models = []
    for degree in range(2, options.max_polynomial_degree):
        polynomial_with_teta = random_polynomials[degree]
        for lmbda in lambdas:
            model = Model(degree, lmbda, options)
            mistake = 0
            for division in divisions:
                division_mistake = model.calc_division_mistake(division, polynomial_with_teta)
                mistake += division_mistake
                model.mistakes.append(division_mistake)
            model.mistake = mistake
            models.append(model)

    utils.sort_models_by_mistake(models)
    # utils.show_models(models)

    best_model = models[0]
    best_model.lmbda = 0
    return best_model
Beispiel #27
0
 def thread_session(thread_id, queue_input, queue_output):
     gpu = args.list_gpus[thread_id]
     with tf.device(gpu):
         opti_adam = build_optimizer(args, type='adam')
         model = Model(args, optimizer=opti_adam, name='fc'+str(thread_id))
         print('thread_{} is waiting to run on {}....'.format(thread_id, gpu))
         while True:
             # s = time()
             x, y, weights = queue_input.get()
             model.set_weights(weights)
             # t = time()
             logits = model(x, training=False)
             loss = model.align_loss(logits, y, args.dim_output, confidence=0.9)
             queue_output.put((loss, thread_id))
Beispiel #28
0
def evaluate_arch_param(evaluate_trainer, supernet, generator, train_loader, test_loader, parameter_metric, backbone_pool, lookup_table, device, CONFIG):
    avg_metric = {"gen_macs":[], "avg":[], "supernet_avg":[]}
    for a in range(parameter_metric.shape[0]):
        arch_param = torch.tensor(parameter_metric.iloc[a].values.reshape(19, -1), dtype=torch.float)
        layers_config = lookup_table.decode_arch_param(arch_param)

        model = Model(layers_config, CONFIG.dataset, CONFIG.classes)
        model = model.to(device)
        if (device.type == "cuda" and CONFIG.ngpu >= 1) :
            model = nn.DataParallel(model, list(range(CONFIG.ngpu)))

        macs = cal_model_efficient(model, CONFIG)
        arch_param, hardware_constraint = evaluate_trainer.set_arch_param(generator, supernet, hardware_constraint=macs, arch_param=arch_param)
        supernet_avg, _ = evaluate_trainer.generator_validate(generator, supernet, test_loader, 0, 0, hardware_constraint=hardware_constraint, arch_param=arch_param, sample=False)
        
        avg_accuracy = 0
        """
        for t in range(CONFIG.train_time):
            model = Model(layers_config, CONFIG.dataset, CONFIG.classes)
            model = model.to(device)
            if (device.type == "cuda" and CONFIG.ngpu >= 1) :
                model = nn.DataParallel(model, list(range(CONFIG.ngpu)))

            optimizer = get_optimizer(model, CONFIG.optim_state)
            scheduler = get_lr_scheduler(optimizer, len(train_loader), CONFIG)

            trainer = Trainer(criterion, None, optimizer, None, scheduler, writer, device, lookup_table, backbone_pool, CONFIG)
            top1_avg = trainer.train_loop(train_loader, test_loader, model)

            avg_accuracy += top1_avg
        """

        avg_accuracy /= CONFIG.train_time
        avg_metric["gen_macs"].append(macs)
        avg_metric["avg"].append(avg_accuracy)
        avg_metric["supernet_avg"].append(supernet_avg)
    return avg_metric
Beispiel #29
0
def main(exp_const, data_const, model_const):
    print('Loading model ...')
    model = Model()
    model.const = model_const
    model.hoi_classifier = HoiClassifier(model.const.hoi_classifier).cuda()
    if model.const.model_num == -1:
        print(
            'No pretrained model will be loaded since model_num is set to -1')
    else:
        state = torch.load(model.const.hoi_classifier.model_pth
                           )  #---------------------------!!!!!!!!!!!!!!!!!!!
        state = {
            kk: state[kk]
            for kk in state.keys() if not 'mlp.embedding' in kk
        }
        #pdb.set_trace()
        model.hoi_classifier.load_state_dict(state)
        #model.hoi_classifier.load_state_dict(
        #    torch.load(model.const.hoi_classifier.model_pth))

    print('Creating data loader ...')
    dataset = Features(data_const)

    eval_model(model, dataset, exp_const)
Beispiel #30
0
    def __init__(self, config_file):

        config = json.loads(open(config_file).read())

        self.cropper = Cropper(config['parking_map'], Model('model.h5'))

        self.mqtt = MqttSender('sender', config['mqtt'])

        self.predictions = None
        self.take_next = False
        self.thread = None

        self.delay = config['delay']
        self.last_take_time = time() - self.delay
        self.dir_name = config['source_directory']