Ejemplo n.º 1
0
    def __init__(self, modeluse):
        from utils.utils import load_model
        from modeling.TP_Net import Res160, Res320
        from modeling.Hourglass import HourglassNet

        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        if device == 'cpu':
            raise EOFError('cpu version for training is not implemented.')
        print('Using device: ', device)
        self.head = {'center': 1, 'dis': 4, 'line': 1}
        if modeluse == 'tp320':
            self.model = load_model(Res320(self.head),
                                    './pretraineds/Res320.pth')
            self.in_res = (320, 320)
        elif modeluse == 'tplite':
            self.model = load_model(Res160(self.head),
                                    './pretraineds/Res160.pth')
            self.in_res = (320, 320)
        elif modeluse == 'tp512':
            self.model = load_model(Res320(self.head),
                                    './pretraineds/Res512.pth')
            self.in_res = (512, 512)
        elif modeluse == 'hg':
            self.model = load_model(HourglassNet(self.head),
                                    './pretraineds/HG128.pth')
            self.in_res = (512, 512)
        else:
            raise EOFError(
                'Please appoint the correct model (option: tp320, tplite, tp512, hg). '
            )

        self.model = self.model.cuda().eval()
Ejemplo n.º 2
0
  def tester(self):
    (X_test, y_test, mask_test, mem_test, unk_test) = self.testdata

    best_models = []
    for i in range(1,5):
      model = self.Model(self.args).to(self.args.device)
      load_model(model, self.args, index=i)
      model.eval()
      best_models.append(model)

    test_loss, confusion_test, confusion_mem_test, (alphas, targets, seq_lengths) = self.run_test(best_models, X_test, y_test, mask_test, mem_test, unk_test)
    
    print("ENSAMBLE TEST RESULTS")
    print(confusion_test)
    print(confusion_mem_test)
    print("test accuracy:\t\t{:.2f} %".format(confusion_test.accuracy() * 100))
    print("test Gorodkin:\t\t{:.4f}".format(gorodkin(confusion_test.ret_mat())))
    print("test mem accuracy:\t{:.2f} %".format(confusion_mem_test.accuracy() * 100))
    print("test mem MCC:\t\t{:.4f}".format(confusion_mem_test.MCC()))

    self.results.set_final(
      alph = alphas.cpu().detach().numpy(), 
      seq_len = seq_lengths.cpu().detach().numpy(), 
      targets = targets, 
      cf = confusion_test.ret_mat(),
      cf_mem = confusion_mem_test.ret_mat(), 
      acc = confusion_test.accuracy(), 
      acc_mem = confusion_mem_test.accuracy())
    
    save_results(self.results,self.args)
Ejemplo n.º 3
0
def main(config):
    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=config.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            detector = MTCNN(config, sess)
            # Load the model
            load_model(config.lfw.valid_model_path)
            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")
            # Run forward pass to calculate embeddings

            with open(config.parse_path) as f:
                names = f.readlines()

            for name in names:
                keyword, name, saved_dir = get_keyword(name, config.output_dir)
                fanye_url = url_init_first + urllib.quote(keyword, safe='/')
                feats = []
                page = 0
                while len(os.listdir(saved_dir)) < 20 and fanye_url != '':
                    onepage_urls, fanye_url = get_onepage_urls(fanye_url)
                    for index, url in enumerate(onepage_urls):
                        img_path = download(url,
                                            page * len(onepage_urls) + index,
                                            saved_dir)
                        try:
                            img = misc.imread(img_path)
                            nrof_faces, face = detect(img, detector, config)
                            face = np.expand_dims(face, 0)
                            if nrof_faces:
                                feed_dict = {
                                    images_placeholder: face,
                                    phase_train_placeholder: False
                                }
                                feats.append(
                                    sess.run(embeddings, feed_dict=feed_dict))
                                if index != 0:
                                    dist = np.sqrt(
                                        np.sum(
                                            np.square(
                                                np.subtract(
                                                    feats[0], feats[-1]))))
                                    print('the %d-th %s image dist: %f' %
                                          (index, name, dist))
                                    if dist > config.max_dist:
                                        os.remove(img_path)
                        except Exception as e:
                            print(e)
                            if img_path:
                                os.remove(img_path)
                            continue
                    page += 1
Ejemplo n.º 4
0
    def tester(self):
        model = self.Model(self.args).to(self.args.device)
        load_model(model, self.args)

        test_loss, test_accuracy = self.run_test(model=model)

        print('| Test | loss {:.4f} | acc {:.2f}%'
              ' |'.format(test_loss, test_accuracy * 100))
        print('-' * 79)
Ejemplo n.º 5
0
def main(config):

    with tf.Graph().as_default():

        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=config.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            pairs = lfw.read_pairs(
                os.path.expanduser(config.lfw.lfw_pairs_path)
            )  # Read the file containing the pairs used for testing
            paths, actual_issame = lfw.get_paths(
                config.input_dir, pairs,
                'png')  # Get the paths for the corresponding images
            load_model(config.lfw.valid_model_path)  # Load the model
            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            image_size = config.lfw.image_size
            embedding_size = embeddings.get_shape()[1]

            print('Runnning forward pass on LFW images')
            batch_size = config.batch_size
            nrof_images = len(paths)
            nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size))
            emb_array = np.zeros((nrof_images, embedding_size))
            for i in range(nrof_batches):
                start_index = i * batch_size
                end_index = min((i + 1) * batch_size, nrof_images)
                paths_batch = paths[start_index:end_index]
                images = load_data(paths_batch, image_size, False, False)
                feed_dict = {
                    images_placeholder: images,
                    phase_train_placeholder: False
                }
                emb_array[start_index:end_index, :] = sess.run(
                    embeddings, feed_dict=feed_dict)

            tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(
                emb_array, actual_issame, nrof_folds=config.lfw.lfw_nrof_folds)

            print('Accuracy: %1.3f+-%1.3f' %
                  (np.mean(accuracy), np.std(accuracy)))
            print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' %
                  (val, val_std, far))

            auc = metrics.auc(fpr, tpr)
            print('Area Under Curve (AUC): %1.3f' % auc)
            eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x),
                         0., 1.)
            print('Equal Error Rate (EER): %1.3f' % eer)
Ejemplo n.º 6
0
    def __init__(self, model_path, confidence_threshold=0.7):
        self.net = get_hourglass(cfg.arch,
                                 num_classes=cfg.num_classes,
                                 is_training=False)
        load_model(self.net, model_path)
        self.net = self.net.to(cfg.device)
        self.net.eval()

        self.nms_threshold = 0.4
        self.confidence_threshold = confidence_threshold
Ejemplo n.º 7
0
    def play(self, load_model=True, test_ep=None, num_step=100000, num_episodes=200, display=True):
        model_q = Model()
        model_target_q = Model(is_target_q=True)
        end_points_q = model_q.model_def(self.inputs, self.env, name='main_q')
        _ = model_target_q.model_def(
            self.target_inputs, self.env, name='target_q')

        init = tf.global_variables_initializer()
        self.saver = tf.train.Saver(max_to_keep=None)

        if load_model:
            utils.load_model(self.saver, self.sess, self.model_dir)
        else:
            self.sess.run(init)

        if test_ep is None:
            test_ep = self.cfg.ep_test

        if not display:
            gym_dir = '/tmp/%s-%s' % (self.cfg.env_name, utils.get_time())
            self.env.env.monitor.start(gym_dir)

        best_reward, best_episode = 0, 0
        for episode in xrange(num_episodes):
            screen, reward, action, terminal = self.env.new_random_game()
            current_reward = 0

            for _ in xrange(self.cfg.history_length):
                self.history.add(screen)

            for t in tqdm(xrange(num_step), ncols=70):
                # 1. predict
                action = self.predict(
                    end_points_q['pred_action'], self.history.get(), ep=test_ep)
                # 2. act
                screen, reward, terminal = self.env.act(
                    action, is_training=False)
                # 3. observe
                self.history.add(screen)

                current_reward += reward
                if terminal:
                    break

            if current_reward > best_reward:
                best_reward = current_reward
                best_episode = episode

            print " [%d] Best reward : %d" % (best_episode, best_reward)

        if not display:
            self.env.env.monitor.close()
Ejemplo n.º 8
0
 def load_model(self, model_file):
     data = load_model(model_file)
     self.word2id = data['word2id']
     self.label2id = data['label2id']
     self.initial_proba = data['initial_proba']
     self.observation_proba = data['observation_proba']
     self.transition_proba = data['transition_proba']
Ejemplo n.º 9
0
def main(arguments):
    model_parameters = {
        'scale': arguments['scale'],
        'learning_rate': 1e-5,
        'D': arguments['D'],
        'C': arguments['C'],
        'G': arguments['G'],
        'kernel_size': 3,
        'c_dim': 3,
        'G0': arguments['G0'],
    }

    model = load_model(model_parameters,
                       arguments['vgg'],
                       verbose=arguments['verbose'])

    if arguments['summary'] is True:
        model.rdn.summary()

    if arguments['train'] is True:
        from trainer.train import Trainer

        trainer = Trainer(train_arguments=arguments)
        trainer.train_model(model)

    if arguments['test'] is True:
        from predict.predict import Predictor

        predictor = Predictor(test_arguments=arguments)
        predictor.get_predictions(model)
Ejemplo n.º 10
0
def train_epoch(model, optimizer, args, hp, step, start_epoch=0, rank=0):
    dataloader = get_dataloader(step, args, hp)

    for epoch in range(start_epoch, hp.max_epoch):
        start_time = time.time()
        if step // hp.accum_grad > hp.warmup_step:
            dataloader = get_dataloader(step, args, hp)

        step = train_loop(model, optimizer, step, epoch, args, hp, rank, dataloader)
 
        print("EPOCH {} end".format(epoch+1))
        print('elapsed time = {}'.format(time.time() - start_time))
        if hp.dev_script is not None:
            if (epoch >= 20 and epoch % 5 == 0) or epoch == 1 and rank == 0:
                print('eval')
                map_location = {'cuda:%d' % 0: 'cuda:%d' % rank}
                loaded_dict = load_model("{}".format(os.path.join(hp.save_dir, 'network.epoch{}'.format(epoch+1))), map_location=map_location)
                model_dev = Transformer(hp)
                model_dev.load_state_dict(loaded_dict)
                model_dev.eval()
                model_dev.to(rank)
                wer_all = recognize(hp, model_dev, model_lm=None, lm_weight=0.0, calc_wer=True, rank=rank)
                print(f'{args.hp_file} epoch {epoch} wer is {wer_all}')

                del model_dev
def _test(parameters):
    model_folder = setup_log(parameters, parameters['save_pred'] + '_test')

    print('\nLoading mappings ...')
    train_loader = load_mappings(parameters['remodelfile'])
    flag=True
    print('\nLoading testing data ...')
    processed_dataset=parameters['remodelfile']
    if flag and os.path.exists(os.path.join(processed_dataset, 'test_test_data.pkl')):
        with open(os.path.join(processed_dataset, 'test_test_data.pkl'), 'rb') as f:
            test_data = pkl.load(f)
        with open(os.path.join(processed_dataset, 'test_prune_recall.pkl'), 'rb') as f:
            prune_recall = pkl.load(f)
    else:
        test_loader = DataLoader(parameters['test_data'], parameters, train_loader)
        test_loader(parameters=parameters)
        test_data, prune_recall = DocRelationDataset(test_loader, 'test', parameters, train_loader).__call__()
        with open(os.path.join(processed_dataset, 'test_test_data.pkl'), 'wb') as f:
            pkl.dump(test_data, f, pkl.HIGHEST_PROTOCOL)
        with open(os.path.join(processed_dataset, 'test_prune_recall.pkl'), 'wb') as f:
            pkl.dump(prune_recall, f, pkl.HIGHEST_PROTOCOL)
    m = Trainer(train_loader, parameters, {'train': [], 'test': test_data}, model_folder, prune_recall)
    trainer = load_model(parameters['remodelfile'], m)
    _, _,_,p,r=trainer.eval_epoch(final=True, save_predictions=True)
    print('Saving test metrics ... ', end="")
    np.savetxt(parameters['remodelfile']+"/p.txt", p)
    np.savetxt(parameters['remodelfile']+"/r.txt", r)

        # b = numpy.loadtxt("filename.txt", delimiter=',')
    print('DONE')
Ejemplo n.º 12
0
def main():
	print('loading model:')
	model=load_model(weight_path)
	print('loading dataset:')
	test_loader = torch.utils.data.DataLoader(CustomDataset(batch_size,path,'test'),batch_size, shuffle=False, num_workers=1)
	print('number of samples',len(test_loader))
	test_model(model, test_loader)
Ejemplo n.º 13
0
def main(config):

    if isinstance(config.compared_imgs, str):
        imgs = config.compared_imgs.split(' ')
    else:
        imgs = config.compared_imgs

    images = load_and_align_data(config, imgs)
    with tf.Graph().as_default():
        with tf.Session() as sess:
            # Load the model
            load_model(config.lfw.valid_model_path)
            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")
            # Run forward pass to calculate embeddings
            feed_dict = {
                images_placeholder: images,
                phase_train_placeholder: False
            }
            emb = sess.run(embeddings, feed_dict=feed_dict)

            nrof_images = len(imgs)

            print('Images:')
            for i in range(nrof_images):
                print('%1d: %s' % (i, imgs[i]))
            print('')

            # Print distance matrix
            print('Distance matrix')
            print('    ', end='')
            for i in range(nrof_images):
                print('    %1d     ' % i, end='')
            print('')
            for i in range(nrof_images):
                print('%1d  ' % i, end='')
                for j in range(nrof_images):
                    dist = np.sqrt(
                        np.sum(np.square(np.subtract(emb[i, :], emb[j, :]))))
                    print('  %1.4f  ' % dist, end='')
                print('')
Ejemplo n.º 14
0
def final_evaluation(train_loader, test_loader, valid_loader,
                     best_model_path_load, model, optimizer, args, dir):
    _ = load_model(best_model_path_load, model, optimizer)
    model.eval()
    exemplars_embedding = load_all_pseudo_input(args, model,
                                                train_loader.dataset)
    test_samples = next(iter(test_loader))[0].to(args.device)
    visualize_reconstruction(test_samples, model, args, dir)
    visualize_generation(train_loader.dataset, model, args, dir)
    test_elbo, test_re, test_kl = evaluate_loss(
        args,
        model,
        test_loader,
        dataset=train_loader.dataset,
        exemplars_embedding=exemplars_embedding)
    valid_elbo, valid_re, valid_kl = evaluate_loss(
        args,
        model,
        valid_loader,
        dataset=valid_loader.dataset,
        exemplars_embedding=exemplars_embedding)
    train_elbo, _, _ = evaluate_loss(args,
                                     model,
                                     train_loader,
                                     dataset=train_loader.dataset,
                                     exemplars_embedding=exemplars_embedding)
    test_log_likelihood = calculate_likelihood(
        args,
        model,
        test_loader,
        exemplars_embedding=exemplars_embedding,
        S=args.S)
    final_evaluation_txt = 'FINAL EVALUATION ON TEST SET\n' \
                           'LogL (TEST): {:.2f}\n' \
                           'LogL (TRAIN): {:.2f}\n' \
                           'ELBO (TEST): {:.2f}\n' \
                           'ELBO (TRAIN): {:.2f}\n' \
                           'ELBO (VALID): {:.2f}\n' \
                           'RE: {:.2f}\n' \
                           'KL: {:.2f}'.format(
        test_log_likelihood,
        0,
        test_elbo,
        train_elbo,
        valid_elbo,
        test_re,
        test_kl)

    print(final_evaluation_txt)
    with open(dir + 'vae_experiment_log.txt', 'a') as f:
        print(final_evaluation_txt, file=f)
    torch.save(test_log_likelihood,
               dir + args.model_name + '.test_log_likelihood')
    torch.save(test_elbo, dir + args.model_name + '.test_loss')
    torch.save(test_re, dir + args.model_name + '.test_re')
    torch.save(test_kl, dir + args.model_name + '.test_kl')
Ejemplo n.º 15
0
    def __init__(self, cfg):
        self.cfg = cfg
        if cfg.arch == 'resnet50':
            self.model = get_pose_net(50, 64, cfg.num_classes)
        else:
            self.model = get_hourglass(cfg.arch, num_classes=cfg.num_classes)

        if cfg.pretrained_weights is not None:
            weight_file = os.path.join(cfg.save_folder, cfg.pretrained_weights)
            load_model(self.model, weight_file)
            print("load pretrain mode:{}".format(weight_file))

        if cfg.num_gpu > 1 and torch.cuda.is_available():
            self.model = torch.nn.DataParallel(self.model).cuda()
        else:
            self.model = self.model.to(cfg.device)
        self.save_folder = cfg.ckpt_dir
        self.optim = optim.Adam(self.model.parameters(), lr=cfg.lr)
        self.scheduler = optim.lr_scheduler.ExponentialLR(self.optim, gamma=0.99)
Ejemplo n.º 16
0
    def __init__(self):
        from utils.utils import load_model
        from modeling.TP_Net import Res160, Res320
        from modeling.Hourglass import HourglassNet

        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        if device == 'cpu':
            raise EOFError('cpu version for training is not implemented.')
        print('Using device: ', device)
        self.head = {'center': 1, 'dis': 4, 'line': 1}
        self.model = load_model(Res320(self.head), './pretraineds/Res320.pth',
                                False, False)
        self.model = self.model.cuda().eval()
        self.in_res = (320, 320)
Ejemplo n.º 17
0
def get_model(state, args, init_model_name=None):
    if init_model_name is not None and os.path.exists(init_model_name):
        model, optimizer, state = load_model(init_model_name,
                                             return_optimizer=True,
                                             return_state=True)
    else:
        if "conv_dropout" in args:
            conv_dropout = args.conv_dropout
        else:
            conv_dropout = cfg.conv_dropout
        cnn_args = {1}

        if args.fixed_segment is not None:
            frames = cfg.frames
        else:
            frames = None

        nb_layers = 4
        cnn_kwargs = {
            "activation": cfg.activation,
            "conv_dropout": conv_dropout,
            "batch_norm": cfg.batch_norm,
            "kernel_size": nb_layers * [3],
            "padding": nb_layers * [1],
            "stride": nb_layers * [1],
            "nb_filters": [16, 16, 32, 65],
            "pooling": [(2, 2), (2, 2), (1, 4), (1, 2)],
            "aggregation": args.agg_time,
            "norm_out": args.norm_embed,
            "frames": frames,
        }
        nb_frames_staying = cfg.frames // (2**2)
        model = CNN(*cnn_args, **cnn_kwargs)
        # model.apply(weights_init)
        state.update({
            'model': {
                "name": model.__class__.__name__,
                'args': cnn_args,
                "kwargs": cnn_kwargs,
                'state_dict': model.state_dict()
            },
            'nb_frames_staying': nb_frames_staying
        })
        if init_model_name is not None:
            save_model(state, init_model_name)
    pytorch_total_params = sum(p.numel() for p in model.parameters()
                               if p.requires_grad)
    LOG.info(
        "number of parameters in the model: {}".format(pytorch_total_params))
    return model, state
Ejemplo n.º 18
0
def run_cls(config_dir):
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'

    # ------------------------------------------------------------------------------------------------------------
    # 1. classification inference
    # ------------------------------------------------------------------------------------------------------------
    config_root = Path(config_dir) / 'cls'
    config_paths = [config_root / p for p in os.listdir(config_root)]
    base_config_paths = [
        Path(config_dir) / p for p in os.listdir(config_dir) if 'yml' in p
    ]
    config = load_config(base_config_paths[0])

    models = []
    for c in config_paths:
        models.append(load_model(c))

    model = MultiClsModels(models)

    testloader = make_loader(
        data_folder=config.data.test_dir,
        df_path=config.data.sample_submission_path,
        phase='test',
        batch_size=config.test.batch_size,
        num_workers=config.num_workers,
        transforms=get_transforms(config.transforms.test),
        num_classes=config.data.num_classes,
    )

    all_fnames = []
    all_predictions = []
    with torch.no_grad():
        for i, (batch_fnames, batch_images) in enumerate(tqdm(testloader)):
            batch_images = batch_images.to(config.device)
            batch_preds = predict_batch(model,
                                        batch_images,
                                        tta=config.test.tta,
                                        task='cls')

            all_fnames.extend(batch_fnames)
            all_predictions.append(batch_preds)

    all_predictions = np.concatenate(all_predictions)

    np.save('all_preds', all_predictions)
    df = pd.DataFrame(data=all_predictions, index=all_fnames)

    df.to_csv('cls_preds.csv')
    df.to_csv(KAGGLE_WORK_DIR + '/cls_preds.csv')
Ejemplo n.º 19
0
def test(data_loader, model, ckp_path, args):

    with torch.no_grad():

        optimizer = torch.optim.Adagrad(model.parameters())
        model = load_model(model, optimizer, ckp_path)
        model.eval()

        outputs = []
        idx_to_name = ['normal', 'smoking', 'calling']

        for i, data in tqdm(enumerate(data_loader), total=len(data_loader)):

            image = data["image"].cuda()
            name = data["name"]

            if args.model == "wsdan":
                y_pred_raw, _, attention_map = model(image)
                with torch.no_grad():
                    crop_images = batch_augment(image,
                                                attention_map[:, :1, :, :],
                                                mode='crop',
                                                theta=(0.4, 0.6),
                                                padding_ratio=0.1)
                y_pred_crop, _, _ = model(crop_images)
                output = (y_pred_raw + y_pred_crop) / 2

            else:
                output = model(image)
            output = softmax(output, dim=-1)

            for j in range(image.size(0)):

                idx = torch.argmax(output[j, :])
                category = idx_to_name[idx]
                score = output[j, idx]

                outputs.append({
                    "category": category,
                    "image_name": name[j],
                    "score": round(float(score), 5)
                })

    outputs.sort(key=lambda x: int(x['image_name'].split('.')[0]))
    with open("./log/result.json", "w+") as f:
        json.dump(outputs, f, indent=4)

    print("Done.")
    return 0
Ejemplo n.º 20
0
def load_mlp_regressor(model_params, ckpt_path):
    """Load a pre-trained MLP regression model.

    Args:
        model_params (dict): dictionary of parameters defining the model, for
            instance as saved by train_regressor_ood.py in a params.json file
        ckpt_path (str): path where the checkpoint is saved.
    Returns:
        model (regressor.MLP): a pre-trained MLP regression model.
    """
    cnn_args = {'dense_sizes':
                [int(x) for x in model_params['dense_sizes'].split(',')],
                'activation': model_params['activation']}
    model = utils.load_model(ckpt_path, regressor.MLP, cnn_args)
    return model
Ejemplo n.º 21
0
def main(config):
    data = DataGenerator(config)
    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=config.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        with sess.as_default():
            print "loading model ..."
            load_model(config.lfw.valid_model_path)
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0") 
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
            embedding_size = embeddings.get_shape()[1]
            
            nrof_images = len(data.input)
            print('Calculating features for images')
            nrof_batches_per_epoch = int(math.ceil(1.0 * nrof_images / config.batch_size))
            emb_array = np.zeros((nrof_images, embedding_size))
            for i in range(nrof_batches_per_epoch):
                start_index = i * config.batch_size
                end_index = min((i + 1) * config.batch_size, nrof_images)
                paths_batch = data.input[start_index:end_index]
                images = load_data(paths_batch, config.lfw.image_size, True) # return 4-d tensor
                feed_dict = {images_placeholder: images, phase_train_placeholder: False}
                emb_array[start_index:end_index, :] = sess.run(embeddings, feed_dict=feed_dict)
                
            classifier_filename_exp = os.path.expanduser(config.classifier_path)
            print('Training classifier')
            model = SVC(kernel='linear', probability=True)
            model.fit(emb_array, data.y)
            
            print "the compute is -->:", compute_rate(model,config,embeddings,images_placeholder,phase_train_placeholder,embedding_size,sess)
            
            class_names = [cls.name.replace('_', ' ') for cls in data.dataset]
            with open(classifier_filename_exp, 'wb') as outfile:
                pickle.dump((model, class_names), outfile)
            print('Saved classifier model to file "%s"' % classifier_filename_exp)
Ejemplo n.º 22
0
def bilstm_train_and_eval(train_data,
                          dev_data,
                          test_data,
                          word2id,
                          tag2id,
                          crf=True,
                          remove_O=False,
                          reload_model=False):
    # data
    train_word_lists, train_tag_lists = train_data
    dev_word_lists, dev_tag_lists = dev_data
    test_word_lists, test_tag_lists = test_data

    # training
    start = time.time()
    vocab_size = len(word2id)
    out_size = len(tag2id)

    # get model_file
    if crf:
        model_name = "bilstm_crf"
    else:
        model_name = "bilstm"
    emb_size = LSTMConfig.emb_size
    hidden_size = LSTMConfig.hidden_size
    model_file = "./weights/" + model_name + '_' + str(emb_size) + '_' + str(
        hidden_size) + ".pkl"

    if reload_model:
        # reload trained model!
        bilstm_model = load_model(model_file)
    else:
        # train and save model!
        bilstm_model = BILSTM_Model(vocab_size, out_size, crf=crf)
        bilstm_model.train(train_word_lists, train_tag_lists, dev_word_lists,
                           dev_tag_lists, word2id, tag2id)
        save_model(
            bilstm_model, model_file
        )  # re-thinking when to save the model? after valid for each epoch?
    print("Training finished, taken {} seconds!".format(
        int(time.time() - start)))
    print("Evaluating {} model:".format(model_name))
    pred_tag_lists, test_tag_lists = bilstm_model.test(test_word_lists,
                                                       test_tag_lists, word2id,
                                                       tag2id)
    results_print(test_tag_lists, pred_tag_lists, remove_O=remove_O)

    return pred_tag_lists
Ejemplo n.º 23
0
def main(**kwargs):
    options._parse(kwargs)
    opt = options
    torch.manual_seed(317)

    print('Setting up data...')

    transforms = T.Compose([T.ToTensor()])
    dataset = MotDataset(opt, (640, 480), augment=True, transforms=transforms)
    opt = opt.update_dataset_info_and_set_heads(opt, dataset)
    print(opt)


    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus
    opt.device = torch.device('cuda' if opt.gpus >= '0' else 'cpu')

    print('Creating model...')
    model = create_model('dla_34', opt.heads, opt.head_conv)
    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    start_epoch = 0

    # Get dataloader

    train_loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=opt.batch_size,
        shuffle=True,
        num_workers=opt.num_workers,
        pin_memory=True,
        drop_last=True
    )

    print('Starting training...')
    trainer = BaseTrainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, -1, opt.device)

    if opt.multi_load_model != '':
      model, optimizer = load_model(model, opt.multi_load_model, trainer.optimizer)

    best = 1e10
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        
        log_dict_train, _ = trainer.train(epoch, train_loader)
        if epoch % opt.save_every == 0:
            lr = opt.lr * 0.5
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)), epoch, model, optimizer)
Ejemplo n.º 24
0
    def make_model(self):
        self.model_params = {
            'learning_rate': 1e-5,
            'kernel_size': 3,
            'scale': 2,
            'c_dim': 3,
            'G0': 25,
            'G': 50,
            'D': 5,
            'C': 3,
        }

        self.model = load_model(self.model_params,
                                add_vgg=False,
                                model_name='rdn',
                                verbose=False)
        self.rdn = self.model.rdn
Ejemplo n.º 25
0
def get_backbone_layers(weights_path,
                        nin_name='model_1',
                        input_layer_name='input_1',
                        output_layer_name='activation_14'):
    backbone = utils.load_model(weights_path, compile=False)
    print(backbone.summary())
    if nin_name is None:
        backbone_in = backbone.get_layer(input_layer_name).input
        backbone_out = backbone.get_layer(output_layer_name).output
    else:
        print(backbone.get_layer(nin_name).summary())
        backbone_in = backbone.get_layer(nin_name).get_layer(
            input_layer_name).input
        backbone_out = backbone.get_layer(nin_name).get_layer(
            output_layer_name).output
        print(backbone.get_layer(nin_name).summary())
    return backbone_in, backbone_out
Ejemplo n.º 26
0
    def get_model_and_loader(config_paths):
        config = load_config(config_paths[0])

        models = []
        for c in config_paths:
            models.append(load_model(c))

        model = MultiSegModels(models)

        testloader = make_loader(
            data_folder=config.data.test_dir,
            df_path=config.data.sample_submission_path,
            phase='test',
            img_size=(config.data.height, config.data.width),
            batch_size=config.test.batch_size,
            num_workers=config.num_workers,
            transforms=get_transforms(config.transforms.test))
        return model, testloader
Ejemplo n.º 27
0
def main():
    params = get_params()
    set_random_seed(params.RANDOM_SEED)
    parse_data()
    data = DatasetNorm('cutted_data')
    train_set, test_set = torch.utils.data.random_split(
        data, [data.__len__() - 100, 100])
    trainloader = DataLoader(dataset=train_set,
                             batch_size=params.BATCH_SIZE,
                             shuffle=True,
                             num_workers=8)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    tcnn = TempoCNN().to(device)

    wandb.init(project="tcnn")
    config = wandb.config
    config.learning_rate = 0.001
    wandb.watch(tcnn)

    if not params.LOAD_MODEL:
        model = train(tcnn, trainloader)
        save_model(model)
    else:
        model = load_model().to(device)

    testloader = DataLoader(dataset=test_set,
                            batch_size=params.BATCH_SIZE,
                            shuffle=True)

    iters = 0
    loss = 0.0
    cr_loss = nn.BCELoss()
    for i, data in enumerate(testloader, 0):
        tcnn.eval()
        mels, labels = data[0].to(device), data[1].to(device)
        pred = model(mels.unsqueeze(-1).permute(0, 3, 1, 2)).to('cpu').detach()
        res = accuracy(pred, labels)
        print(res)

        loss += cr_loss(pred.float(), labels.float().to('cpu').detach()).item()
        iters += 1

    print(loss / iters)
Ejemplo n.º 28
0
def initial_or_load(checkpoint_path_load, model, optimizer, dir):
    if os.path.exists(checkpoint_path_load):
        model_loaded_str = "******model is loaded*********"
        print(model_loaded_str)
        with open(dir + 'whole_log.txt', 'a') as f:
            print(model_loaded_str, file=f)
        checkpoint = load_model(checkpoint_path_load, model, optimizer)
        begin_epoch = checkpoint['epoch']
        best_loss = checkpoint['best_loss']
        e = checkpoint['e']
    else:
        torch.manual_seed(args.seed)
        if args.device == 'cuda':
            torch.cuda.manual_seed(args.seed)
        random.seed(args.seed)
        begin_epoch = 1
        best_loss = math.inf
        e = 0
    return begin_epoch, best_loss, e
Ejemplo n.º 29
0
def _test(parameters):
    model_folder = setup_log(parameters, parameters['save_pred'] + '_test')

    print('\nLoading mappings ...')
    train_loader = load_mappings(parameters['remodelfile'])

    print('\nLoading testing data ...')
    test_loader = DataLoader(parameters['test_data'], parameters, train_loader)
    test_loader(parameters=parameters)
    test_data, prune_recall = DocRelationDataset(test_loader, 'test',
                                                 parameters,
                                                 train_loader).__call__()

    m = Trainer(train_loader, parameters, {
        'train': [],
        'test': test_data
    }, model_folder, prune_recall)
    trainer = load_model(parameters['remodelfile'], m)
    trainer.eval_epoch(final=True, save_predictions=True)
Ejemplo n.º 30
0
def load_cnn_classifier(model_params, ckpt_path):
    """Load a pre-trained CNN classifier.

    Args:
        model_params (dict): dictionary of parameters defining the CNN, for
            instance as saved by train_classifier_ood.py in a params.json file
        ckpt_path (str): path where the checkpoint is saved.
    Returns:
        model (classifier.CNN): a pre-trained CNN classifier.
    """
    cnn_args = {'conv_dims':
                    [int(x) for x in model_params['conv_dims'].split(',')],
                'conv_sizes':
                    [int(x) for x in model_params['conv_sizes'].split(',')],
                'dense_sizes':
                    [int(x) for x in model_params['dense_sizes'].split(',')],
                'n_classes': model_params['n_classes'], 'onehot': True}
    model = utils.load_model(ckpt_path, classifier.CNN, cnn_args)
    return model