Exemplo n.º 1
0
def main():
    trainset, valset, testset = cath_dataset(
        1800, jsonl_file=sys.argv[1])  # batch size = 1800 residues
    optimizer = tf.keras.optimizers.Adam()
    model = make_model()

    model_id = int(datetime.timestamp(datetime.now()))

    NUM_EPOCHS = 100
    loop_func = util.loop
    best_epoch, best_val = 0, np.inf

    for epoch in range(NUM_EPOCHS):
        loss, acc, confusion = loop_func(trainset,
                                         model,
                                         train=True,
                                         optimizer=optimizer)
        util.save_checkpoint(model, optimizer, model_id, epoch)
        print('EPOCH {} TRAIN {:.4f} {:.4f}'.format(epoch, loss, acc))
        util.save_confusion(confusion)
        loss, acc, confusion = loop_func(valset, model, train=False)
        if loss < best_val:
            best_epoch, best_val = epoch, loss
        print('EPOCH {} VAL {:.4f} {:.4f}'.format(epoch, loss, acc))
        util.save_confusion(confusion)

    # Test with best validation loss
    path = util.models_dir.format(str(model_id).zfill(3), str(epoch).zfill(3))
    util.load_checkpoint(model, optimizer, path)
    loss, acc, confusion = loop_func(testset, model, train=False)
    print('EPOCH TEST {:.4f} {:.4f}'.format(loss, acc))
    util.save_confusion(confusion)
Exemplo n.º 2
0
def main():
    trainset, valset, testset = rocklin_dataset(
        32)  # batch size = 1800 residues
    optimizer = tf.keras.optimizers.Adam()
    model = make_model()

    model_id = int(datetime.timestamp(datetime.now()))

    NUM_EPOCHS = 50
    loop_func = loop
    best_epoch, best_val = 0, np.inf

    for epoch in range(NUM_EPOCHS):
        loss = loop_func(trainset, model, train=True, optimizer=optimizer)
        print('EPOCH {} training loss: {}'.format(epoch, loss))
        save_checkpoint(model, optimizer, model_id, epoch)
        print('EPOCH {} TRAIN {:.4f}'.format(epoch, loss))
        #util.save_confusion(confusion)
        loss = loop_func(valset, model, train=False, val=False)
        print(' EPOCH {} validation loss: {}'.format(epoch, loss))
        if loss < best_val:
            #Could play with this parameter here. Instead of saving best NN based on loss
            #we could save it based on precision/auc/recall/etc.
            best_epoch, best_val = epoch, loss
        print('EPOCH {} VAL {:.4f}'.format(epoch, loss))
        #util.save_confusion(confusion)

# Test with best validation loss
    path = models_dir.format(str(model_id).zfill(3), str(epoch).zfill(3))
    load_checkpoint(model, optimizer, path)
    loss, tp, fp, tn, fn, acc, prec, recall, auc, y_pred, y_true = loop_func(
        testset, model, train=False, val=True)
    print('EPOCH TEST {:.4f} {:.4f}'.format(loss, acc))
    #util.save_confusion(confusion)
    return loss, tp, fp, tn, fn, acc, prec, recall, auc, y_pred, y_true
Exemplo n.º 3
0
    def test(self):
        load_checkpoint(self.netG_A2B,
                        './results/cyclegan/checkpoints/netG_A2B_final.pth',
                        self.device)
        load_checkpoint(self.netG_B2A,
                        './results/cyclegan/checkpoints/netG_B2A_final.pth',
                        self.device)

        os.makedirs('./results/cyclegan/evaluation/', exist_ok=True)

        self.netG_A2B.eval()
        self.netG_B2A.eval()

        print("=====Test Start======")

        with torch.no_grad():
            for iter, (real_A, real_B) in enumerate(self.testloader):
                fake_A = self.netG_B2A(real_B.to(self.device))
                save_image(
                    denorm(fake_A),
                    os.path.join('./results/cyclegan/evaluation',
                                 'fake_image-{:05d}.png'.format(iter + 1)))

        # Compute the Inception score
        dataset = FolderDataset(
            folder=os.path.join('./results/cyclegan/evaluation'))
        Inception = Inception_Score(dataset)
        score = Inception.compute_score(splits=1)

        print('Inception Score : ', score)
Exemplo n.º 4
0
    def test(self):
        load_checkpoint(
            self.netG,
            os.path.join('./results', self.type, 'checkpoints',
                         'netG_final.pth'), self.device)
        self.netG.eval()

        os.makedirs(os.path.join('./results/', self.type, 'evaluation'),
                    exist_ok=True)

        print("=====Test Start======")

        with torch.no_grad():
            for iter in range(1000):
                z = torch.randn(1, 256).to(self.device)
                fake_img = self.netG(z)
                save_image(
                    denorm(fake_img),
                    os.path.join('./results/', self.type, 'evaluation',
                                 'fake_image-{:05d}.png'.format(iter + 1)))

        # Compute the Inception score
        dataset = FolderDataset(
            folder=os.path.join('./results/', self.type, 'evaluation'))
        Inception = Inception_Score(dataset)
        score = Inception.compute_score(splits=1)

        print('Inception Score : ', score)
Exemplo n.º 5
0
def main():
    print('this is alpha {}'.format(args.alpha))
    os.makedirs(f'{args.save_dir}', exist_ok=True)
    os.makedirs(f'{args.save_dir}/{args.out_oldweight_folder}', exist_ok=True)
    os.makedirs(f'{args.save_dir}/{args.out_pruned_folder}', exist_ok=True)
    os.makedirs(f'{args.save_dir}/{args.out_pruned_re_folder}', exist_ok=True)
    os.makedirs(f'{args.save_dir}/{args.out_quantized_folder}', exist_ok=True)
    os.makedirs(f'{args.save_dir}/{args.out_quantized_re_folder}',
                exist_ok=True)
    util.log(f"{args.save_dir}/{args.log}",
             "--------------------------configure----------------------")
    util.log(f"{args.save_dir}/{args.log}", f"{args}\n")
    if args.train_mode == 1:
        # Define model
        model = vgg_mask.VGG16('VGG16', args.partition, mask_flag=True).cuda()
        model = initial_process(model)
    elif args.train_mode == 2:
        if os.path.isfile(f"{args.load_model}"):
            print("-------load " + f"{args.load_model} ----")
            model = vgg_mask.VGG16('VGG16', args.partition,
                                   mask_flag=True).cuda()
            model = util.load_checkpoint(model, f"{args.load_model}", args)
            model = pruning_process(model)
        else:
            print("---not found " + f"{args.load_model} ----")
    elif args.train_mode == 3:
        if os.path.isfile(f"{args.load_model}"):
            print("-------load " + f"{args.load_model} ----")
            model = vgg_mask.VGG16('VGG16', args.partition,
                                   mask_flag=True).cuda()
            model = util.load_checkpoint(model, f"{args.load_model}", args)
            model = quantize_process(model)
        else:
            print("---not found " + f"{args.load_model} ----")
    elif args.train_mode == 4:  # initial train/ prune cnn/ qauntize
        model = vgg_mask.VGG16('VGG16', args.partition, mask_flag=True).cuda()
        model = initial_process(model)
        model = pruning_process(model)
        quantize_process(model)
    elif args.train_mode == 5:  # initial train/ qauntize
        model = vgg_mask.VGG16('VGG16', args.partition, mask_flag=True).cuda()
        st = time.time()
        model = initial_process(model)
        print('time init:', time.time() - st)
        model = quantize_process(model)
        print('time init+qauntize:', time.time() - st)
    elif args.train_mode == 6:  #load base model, prune and quantization
        if os.path.isfile(f"{args.load_model}"):
            print("-------load " + f"{args.load_model} ----")
            model = vgg_mask.VGG16('VGG16', args.partition,
                                   mask_flag=True).cuda()
            model = util.load_checkpoint(model, f"{args.load_model}", args)
            model = pruning_process(model)
            model = quantize_process(model)
        else:
            print("---not found " + f"{args.load_model} ----")
Exemplo n.º 6
0
def main():
    args = parse_args('test')
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    transform_test = transforms.Compose([
        transforms.Resize(224),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])

    test_set = DataLoader(args.test_list, transform=transform_test)

    test_loader = torch.utils.data.DataLoader(test_set,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=args.num_workers)

    # Model
    print("==> creating model")
    num_classes = args.num_classes
    model = create_model(num_classes, args).to(device)

    criterion = nn.CrossEntropyLoss()

    # testing
    expname = args.expname.split('/')[-1]
    print('\n' + expname + ': TESTING!')
    train_set = os.path.basename(args.train_list).split('.')[0]
    val_set = os.path.basename(args.val_list).split('.')[0]
    test_set = os.path.basename(args.test_list).split('.')[0]

    if args.average:
        record = pd.read_csv(os.path.join(args.output_csv_dir, 'output.csv'), index_col=0)
        sorted_r = record.sort_values('f1', ascending=False)

        model_list = list(sorted_r['epoch_num'].astype(int))[:5]
        df = pd.DataFrame(columns=['exp', 'train', 'val', 'test', 'test_loss', 'test_acc', 'f1'])
        for idx, epoch in enumerate(model_list):
            model = load_checkpoint(model, os.path.join(args.save_model_dir, f'epoch{epoch}_checkpoint.pth.tar'))
            test_loss, test_acc, f1, _ = test(test_loader, model, criterion, device, args, epoch=idx+1)

            df.loc[len(df)] = [expname, train_set, val_set, test_set, test_loss, test_acc, f1]

        output_csv_file = os.path.join(args.output_csv_dir, 'test_acc.csv')
        df.to_csv(output_csv_file, index=False)
    else:
        df = pd.DataFrame(columns=['exp', 'train', 'val', 'test', 'test_loss', 'test_acc', 'f1'])
        model = load_checkpoint(model, os.path.join(save_model_dir, 'model_best_f1.pth.tar'))
        test_loss, test_acc, f1, _ = test(test_loader, model, criterion, device, args)

        df.loc[len(df)] = [expname, train_set, val_set, test_set, test_loss, test_acc, f1]
        output_csv_file = os.path.join(output_csv_dir, 'test_f1.csv')
        df.to_csv(output_csv_file, index=False)
Exemplo n.º 7
0
    def initialize(self, ctx):
        self.manifest = ctx.manifest

        # set config, path
        properties = ctx.system_properties
        model_dir = properties.get('model_dir')
        logger.info("model_dir: %s", model_dir)
        opt = Opt(model_dir)
        with open(opt.config, 'r', encoding='utf-8') as f:
            self.config = json.load(f)
        logger.info("%s", self.config)
        if opt.num_threads > 0: torch.set_num_threads(opt.num_threads)
        self.config['opt'] = opt
        logger.info("opt.device: %s", opt.device)
        logger.info("opt.num_threads: %s", opt.num_threads)
        logger.info("opt.enable_dqm: %s", opt.enable_dqm)

        # load pytorch model checkpoint
        checkpoint = load_checkpoint(opt.model_path, device=opt.device)

        # prepare model and load parameters
        self.model = self.load_model(checkpoint)
        self.model.eval()

        # enable to use dynamic quantized model (pytorch>=1.3.0)
        if opt.enable_dqm and opt.device == 'cpu':
            self.model = torch.quantization.quantize_dynamic(self.model,
                                                             {torch.nn.Linear},
                                                             dtype=torch.qint8)
            print(self.model)

        # prepare tokenizer
        self.tokenizer = self.prepare_tokenizer()

        self.initialized = True
Exemplo n.º 8
0
def main():
    args = parse_args('predict')
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    transform = transforms.Compose([
        transforms.Resize(224),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])

    test_set = DataLoader(args.ext_test, transform=transform)
    test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.batch_size, shuffle=False)

    print("==> creating model")
    num_classes = args.num_classes
    model = create_model(num_classes, args).to(device)

    if args.average:
        record = pd.read_csv(os.path.join(args.output_csv_dir, 'output.csv'), index_col=0)
        sorted_r = record.sort_values('f1', ascending=False)

        model_list = list(sorted_r['epoch_num'].astype(int))[:5]
        for idx, epoch in enumerate(model_list):
            print('\nPredict with ' f'epoch{epoch} checkpoint')
            model = load_checkpoint(model, os.path.join(args.save_model_dir, f'epoch{epoch}_checkpoint.pth.tar')).to(device)
            predict(test_loader, model, device, args, idx)
    else:
        predict(test_loader, model, device, args)

    predict(test_loader, model, device, args)
Exemplo n.º 9
0
def main(rootpath=ROOT_PATH):
    args = parse_args()


    if args.arch == 'resnet101':
        model = models.resnet101(pretrained=False)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, len(class_names))
    elif args.arch == 'resnet152':
        model = models.resnet152(pretrained=False)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, len(class_names))
    elif args.arch == 'denset121':
        model = models.desnet121(pretrained=False)
        num_ftrs = model.classifer.in_features
        model.classifier = nn.Linear(num_ftrs, len(class_names))
    elif args.arch == 'inception_v3':
        model = models.inception_v3(pretrained=False)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, len(class_names))

    checkpoint = load_checkpoint(resume)

    start_epoch = checkpoint['epoch']
    best_acc = checkpoint['best_prec1']
    model.load_state_dict(checkpoint['state_dict'])

    #optimizer.load_state_dict(checkpoint['optimizer'])
    print("=> loaded checkpoint '{}' (epoch {}, val acc {})".format(resume, checkpoint['epoch'], best_acc))
        
    if use_gpu:
        model = model.cuda()
    model.train(False)  

    test(model, res_dir)
Exemplo n.º 10
0
def test_allkd(s_name, params):
    teachers = ["resnet8", "resnet14", "resnet20", "resnet26",
                "resnet32", "resnet44", "resnet56",
                # "resnet34", "resnet50", "resnet101", "resnet152",
                ]
    accs = {}
    for t_name in teachers:
        params_t = params.copy()
        params_t["teacher_name"] = t_name
        t_net, best_teacher, best_t_acc = setup_teacher(t_name, params_t)
        t_net = util.load_checkpoint(t_net, best_teacher, params_t["device"])
        t_net = freeze_teacher(t_net)
        s_net = setup_student(s_name, params_t)
        params_t["test_name"] = f"{s_name}_{t_name}"
        params_t["results_dir"] = params_t["results_dir"].joinpath("allkd")
        util.check_dir(params_t["results_dir"])
        best_acc = test_kd(s_net, t_net, params_t)
        accs[t_name] = (best_t_acc, best_acc)

    best_acc = 0
    best_t_acc = 0
    for t_name, acc in accs.items():
        if acc[0] > best_t_acc:
            best_t_acc = acc[0]
        if acc[1] > best_acc:
            best_acc = acc[1]
        print(f"Best results teacher {t_name}: {acc[0]}")
        print(f"Best results for {s_name}: {acc[1]}")

    return best_t_acc, best_acc
Exemplo n.º 11
0
def load_gen_inf_mem(algorithm, num_particles, seed):
    checkpoint_path = "checkpoints/checkpoint_{}_{}_{}.pt".format(
        algorithm, num_particles, seed)

    (
        generative_model,
        inference_network,
        theta_losses,
        phi_losses,
        cluster_cov_distances,
        test_log_ps,
        test_log_ps_true,
        test_kl_qps,
        test_kl_pqs,
        test_kl_qps_true,
        test_kl_pqs_true,
        train_log_ps,
        train_log_ps_true,
        train_kl_qps,
        train_kl_pqs,
        train_kl_qps_true,
        train_kl_pqs_true,
        train_kl_memory_ps,
        train_kl_memory_ps_true,
        memory,
        _,
        _,
    ) = util.load_checkpoint(checkpoint_path, torch.device("cpu"))

    return generative_model, inference_network, memory
Exemplo n.º 12
0
def main():
    """
    Load data and train a model on it.
    """
    args = argument_parser().parse_args()
    print(args)
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.set_device(0)

    train_set, val_set, test_set = read_dataset(DATA_DIR)

    if not args.pretrained:
        print('Training...')
        train(train_set, test_set, args.checkpoint, **train_kwargs(args))
    else:
        print('Restoring from checkpoint...')
        model_state, op_state, meta_iteration, cur_meta_step_size, accuracy_tracking = load_checkpoint(
            args.checkpoint)
        train(train_set, test_set, args.checkpoint, model_state, op_state,
              **train_kwargs(args))

    print('\nEvaluating...')
    model_state, op_state, meta_iteration, cur_meta_step_size, accuracy_tracking = load_checkpoint(
        args.checkpoint)
    do_evaluation(model_state, op_state, args.checkpoint, val_set, test_set,
                  train_set)
Exemplo n.º 13
0
def setup_valid(model, batcher, config):
    valid_directory = config['valid_folder']

    if not os.path.exists(valid_directory):
        os.makedirs(valid_directory)
    saver = tf.train.Saver(max_to_keep=3)
    sess = tf.Session(config=util.get_config())
    #sess = tf.Session()
    _ = util.load_checkpoint(
        saver=saver,
        sess=sess,
        log_directory=config['log_folder'],
        checkpoint_directory='train')  ### valid or train ???
    best_valid_loss = None

    while True:
        valid_loss = 0
        for i in range(config['valid_iter']):
            batch = batcher.next_batch()
            to_return = model.model_valid(batch, sess)
            valid_loss += to_return['loss']

        ### if blabla
        if best_valid_loss == None or valid_loss < best_valid_loss:
            best_valid_loss = valid_loss
            saver.save(sess,
                       config['bestmodel_save_path'],
                       latest_filename='checkpoint_best')
            print('save checkpoint')
        else:
            print('not update checkpoint')
Exemplo n.º 14
0
def run_benchmarks(modes, params, s_name, t_name):
    results = {}

    # if we test allkd we do not need to train an individual teacher
    if "allkd" in modes:
        best_t_acc, results["allkd"] = test_allkd(s_name, params)
        modes.remove("allkd")
    else:
        t_net, best_teacher, best_t_acc = setup_teacher(t_name, params)

    for mode in modes:
        mode = mode.lower()
        params_s = params.copy()
        # reset the teacher
        t_net = util.load_checkpoint(t_net, best_teacher, params["device"])

        # load the student and create a results directory for the mode
        s_net = setup_student(s_name, params)
        params_s["test_name"] = s_name
        params_s["results_dir"] = params_s["results_dir"].joinpath(mode)
        util.check_dir(params_s["results_dir"])
        # start the test
        try:
            run_test = globals()[f"test_{mode}"]
            results[mode] = run_test(s_net, t_net, params_s)
        except KeyError:
            raise RuntimeError(f"Training mode {mode} not supported!")

    # Dump the overall results
    print(f"Best results teacher {t_name}: {best_t_acc}")
    for name, acc in results.items():
        print(f"Best results for {s_name} with {name} method: {acc}")
Exemplo n.º 15
0
def get_model(word_to_idx, label_to_idx, resume=False, use_glove=True):
    """Resume a saved model or build a new model"""

    best_acc = 0  # best test accuracy
    start_epoch = 0  # start from epoch 0 or last checkpoint epoch

    if resume:
        # load checkpoint
        checkpoint = load_checkpoint()
        model = checkpoint['model']
        best_acc = checkpoint['acc']
        start_epoch = checkpoint['epoch']
    else:
        print('==> Building model {}...'.format(cfg.RUN_MODE))
        if cfg.RUN_MODE in ["RNN", "LSTM", "GRU"]:
            model = BatchRNN(cfg.EMBEDDING_DIM,
                             cfg.HIDDEN_DIM,
                             cfg.BATCH_SIZE,
                             len(word_to_idx),
                             len(label_to_idx),
                             rnn_model=cfg.RUN_MODE)
        else:
            model = CNN_NLP(cfg.EMBEDDING_DIM, cfg.HIDDEN_DIM, cfg.BATCH_SIZE,
                            len(word_to_idx), len(label_to_idx))
        if use_glove:
            # model.load_glove_model('GloVe-1.2/vectors.txt', word_to_idx)
            model.load_glove_model(cfg.GLOVE_FILE,
                                   word_to_idx,
                                   regenerate=True)
    return model, best_acc, start_epoch
Exemplo n.º 16
0
def load(algorithm, num_particles):
    things = [[] for _ in range(19)]
    for seed in range(1, 11):
        checkpoint_path = 'checkpoints/checkpoint_{}_{}_{}.pt'.format(
            algorithm, seed, num_particles)
        checkpoint = util.load_checkpoint(checkpoint_path, torch.device('cpu'))
        for i, x in enumerate(checkpoint[2:-3] + checkpoint[-2:]):
            if i >= 17 and (algorithm == 'rws' or algorithm == 'vimco'):
                things[i].append(moving_average(x, 10))
            else:
                things[i].append(x)

    # cut to min length
    for i in range(len(things)):
        if i < 2:
            things[i] = [moving_average(x) for x in things[i]]
        if things[i][0] is not None:
            min_length = min(map(len, things[i]))
            things[i] = [x[:min_length] for x in things[i]]

    mid, lower, upper = [], [], []
    for i, thing in enumerate(things):
        if thing[0] is None:
            mid.append(None)
            lower.append(None)
            upper.append(None)
        else:
            mid.append(np.quantile(np.array(thing), 0.5, axis=0))
            lower.append(np.quantile(np.array(thing), 0.25, axis=0))
            upper.append(np.quantile(np.array(thing), 0.75, axis=0))

    # mid = [np.quantile(np.array(thing), 0.5, axis=0) for thing in things]
    # lower = [np.quantile(np.array(thing), 0.25, axis=0) for thing in things]
    # upper = [np.quantile(np.array(thing), 0.75, axis=0) for thing in things]
    return mid, lower, upper
Exemplo n.º 17
0
def setup_teacher(t_name, params):
    # Teacher Model
    num_classes = params["num_classes"]
    t_net = create_model(t_name, num_classes, params["device"])
    teacher_config = params.copy()
    teacher_config["test_name"] = t_name + "_teacher"

    if params["t_checkpoint"]:
        # Just validate the performance
        print("---------- Loading Teacher -------")
        best_teacher = params["t_checkpoint"]
    else:
        # Teacher training
        print("---------- Training Teacher -------")
        teacher_trainer = BaseTrainer(t_net, config=teacher_config)
        teacher_trainer.train()
        best_teacher = teacher_trainer.best_model_file

    # reload and get the best model
    t_net = util.load_checkpoint(t_net, best_teacher)
    teacher_trainer = BaseTrainer(t_net, config=teacher_config)
    best_t_acc = teacher_trainer.validate()

    # also save this information in a csv file for plotting
    name = teacher_config["test_name"] + "_val"
    acc_file_name = params["results_dir"].joinpath(f"{name}.csv")
    with acc_file_name.open("w+") as acc_file:
        acc_file.write("Training Loss,Validation Loss\n")
        for _ in range(params["epochs"]):
            acc_file.write(f"0.0,{best_t_acc}\n")
    return t_net, best_teacher, best_t_acc
Exemplo n.º 18
0
def build_gen_dis(config):
    net_gen = generator(config.DIM_NOISE, config.DIM_IMG,
                        config.N_CHANNEL).to(config.DEVICE)
    net_dis = discriminator(config.DIM_IMG, config.N_CHANNEL).to(config.DEVICE)

    if config.INIT:
        net_gen.apply(init_weight)
        net_dis.apply(init_weight)
        loss = None
        print('initialize model successed')
    else:
        ext = config.PATH_MODEL[-4:]
        path_model = config.PATH_IMPORT_MODEL[:-4] + '_epoch_%d' % config.IMPORT_IDX_EPOCH
        print(path_model)
        path_model = path_model + ext
        print(path_model)
        if config.DEVICE == torch.device("cpu"):
            device = 'cpu'
        else:
            device = 'cuda:0'
        net_gen, net_dis, loss = util.load_checkpoint(config.EPOCHS, net_gen,
                                                      net_dis, path_model,
                                                      device)
        print("load model successed.")

    return net_gen, net_dis, loss
Exemplo n.º 19
0
  def decode(self):
    """Decode examples until data is exhausted (if FLAGS.single_pass) and return, or decode indefinitely, loading latest checkpoint at regular intervals"""
    t0 = time.time()
    counter = 0
    step = 0
    while True:
      step += 1
      batch = self._batcher.next_batch()  # 1 example repeated across batch
      if batch is None: # finished decoding dataset in single_pass mode
        assert self._model.single_pass, "Dataset exhausted, but we are not in single_pass mode"
        tf.logging.info("Decoder has finished reading dataset for single_pass.")
        tf.logging.info("Output has been saved in %s and %s. Now starting ROUGE eval...", self._rouge_ref_dir, self._rouge_dec_dir)
        results_dict = rouge_eval(self._rouge_ref_dir, self._rouge_dec_dir)
        rouge_log(results_dict, self._decode_dir)
        return


      t_start_decode = time.time()
      
      original_article = batch.original_articles[0]  # string
      original_abstract = batch.original_abstracts[0]  # string
      original_abstract_sents = batch.original_abstracts_sents[0]  # list of strings

      article_withunks = data.show_art_oovs(original_article, self._vocab) # string
      abstract_withunks = data.show_abs_oovs(original_abstract, self._vocab, (batch.art_oovs[0] if self._model.pointer_gen else None)) # string

      # Run beam search to get best Hypothesis
      best_hyp = self._model.model_decode(batch, self._sess, self._vocab)
      print(best_hyp.tokens)
      #best_hyp = None

      # Extract the output ids from the hypothesis and convert back to words
      output_ids = [int(t) for t in best_hyp.tokens[1:]]
      decoded_words = data.outputids2words(output_ids, self._vocab, (batch.art_oovs[0] if self._model.pointer_gen else None))

      # Remove the [STOP] token from decoded_words, if necessary
      try:
        fst_stop_idx = decoded_words.index(data.STOP_DECODING) # index of the (first) [STOP] symbol
        decoded_words = decoded_words[:fst_stop_idx]
      except ValueError:
        decoded_words = decoded_words
      decoded_output = ' '.join(decoded_words) # single string

      if self._model.single_pass:
        self.write_for_rouge(original_abstract_sents, decoded_words, counter) # write ref summary and decoded summary to file, to eval with pyrouge later
        counter += 1 # this is how many examples we've decoded
      else:
        print_results(article_withunks, abstract_withunks, decoded_output) # log output to screen
        self.write_for_attnvis(article_withunks, abstract_withunks, decoded_words, best_hyp.attn_dists, best_hyp.p_gens) # write info to .json file for visualization tool

        # Check if SECS_UNTIL_NEW_CKPT has elapsed; if so return so we can load a new checkpoint
        t1 = time.time()
        if t1-t0 > SECS_UNTIL_NEW_CKPT:
          print('We\'ve been decoding with same checkpoint for %i seconds. Time to load new checkpoint', t1-t0)
          _ = util.load_checkpoint(self._saver, self._sess)
          t0 = time.time()

      t_end_decode = time.time()
      print('decode {}-th batch requires {} seconds'.format(step, int(t_end_decode - t_start_decode)))
Exemplo n.º 20
0
def main():
    parser = argparse.ArgumentParser(description='Predict a file.')
    parser.add_argument('input_img',
                        type=str,
                        help='Path of the input image',
                        default="./flowers/test/2/image_05100.jpg")
    parser.add_argument('checkpoint',
                        type=str,
                        help='Path of the checkpoint',
                        default="./checkpoint.pth")
    parser.add_argument('--top_k', help='Top k', type=int, default=5)
    parser.add_argument('--gpu',
                        action='store_true',
                        help='Use GPU for inference if GPU is available')
    parser.add_argument('--category_names',
                        action="store",
                        default='cat_to_name.json')

    args, _ = parser.parse_known_args()

    input_img = args.input_img
    checkpoint = args.checkpoint

    category_names = 'cat_to_name.json'
    if args.category_names:
        category_names = args.category_names

    top_k = 5
    if args.top_k:
        top_k = args.top_k

    cuda = False
    if args.gpu:
        if torch.cuda.is_available():
            cuda = True
        else:
            print("GPU flag was set but no GPU is available in this machine.")

    loaded_model = util.load_checkpoint(checkpoint, cuda)

    with open('cat_to_name.json', 'r') as json_file:
        cat_to_name = json.load(json_file)

    probabilities, classes = util.predict(input_img, loaded_model, top_k)

    labels = [cat_to_name[str(int(index) + 1)] for index in classes]
    probability = np.array(probabilities)

    i = 0
    while i < top_k:
        print("{} with a probability of {:.2f}%".format(
            labels[i], probability[i] * 100))
        i += 1

    print("Prediction Done.")
Exemplo n.º 21
0
def test_multikd(s_net, t_net1, params):
    t_net1 = freeze_teacher(t_net1)
    print("---------- Training MULTIKD -------")
    kd_config = params.copy()
    params["t2_name"] = "WRN22_4"
    t_net2 = create_model(params["t2_name"], params["num_classes"],
                          params["device"])
    t_net2 = util.load_checkpoint(t_net2, "pretrained/WRN22_4_cifar10.pth")
    t_net2 = freeze_teacher(t_net2)

    params["t3_name"] = "resnet18"
    t_net3 = create_model(params["t3_name"], params["num_classes"],
                          params["device"])
    t_net3 = util.load_checkpoint(t_net3, "pretrained/resnet18_cifar10.pth")
    t_net3 = freeze_teacher(t_net3)

    t_nets = [t_net1, t_net2]
    kd_trainer = MultiTrainer(s_net, t_nets=t_nets, config=kd_config)
    best_acc = kd_trainer.train()
    return best_acc
def get_model(args):
    if args.model_mode == 'c':
        if args.use_model == 'alex':
            args.prune_rates = [0.16, 0.62, 0.65, 0.63, 0.63]
            model = AlexNet(100, mask_flag=True).to(args.device)
        elif args.use_model == 'vgg16':
            # args.prune_rates = [0.42, 0.78, 0.66, 0.64, 0.47, 0.76, 0.58, 0.68, 0.73, 0.66, 0.65, 0.71, 0.64]  # deepC
            args.prune_rates = [0.50, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.50, 0.75, 0.75, 0.75, 0.75, 0.75]  # PFEC
            model = vgg16().to(args.device)
        else:
            raise Exception
    else:
        model = AlexNetMask(args.partition, 100, mask_flag=True).to(args.device)
    if os.path.isfile(f"{args.load_model}"):
        model, args.best_top1_acc = util.load_checkpoint(model, f"{args.load_model}", args)
        print("-------load " + f"{args.load_model} ({args.best_top1_acc:.3f})----")
    return model
Exemplo n.º 23
0
def run_takd_distillation(s_net, ta_net, t_net, **params):

    # Teaching Assistant training
    print("---------- Training TA -------")
    ta_config = params.copy()
    ta_name = ta_config["ta_name"]
    ta_config["test_name"] = f"{ta_name}_ta_trainer"
    ta_trainer = KDTrainer(ta_net, t_net=t_net, config=ta_config)
    best_ta_acc = ta_trainer.train()
    best_ta = ta_trainer.best_model_file
    ta_net = util.load_checkpoint(ta_net, best_ta)

    # Student training
    print("---------- Training TA Student -------")
    s_trainer = KDTrainer(s_net, t_net=ta_net, config=params)
    best_s_acc = s_trainer.train()

    print(f"Best results teacher {ta_name}: {best_ta_acc}")
    return best_s_acc
def get_error_rate_with_two_classes(training=False):
    """
    Compute the overall error rate of the trained model.

    Combine positive and negative as one class, compared to neutral.
    If training is False, use test_data, otherwise training_data.
    """
    model = load_checkpoint()['model']

    data, word_to_idx, _ = data_loader(training)

    targets = torch.LongTensor()
    predicts = torch.LongTensor()
    for sentences, _targets, seq_lengths in get_batch_data(data,
                                                           cfg.BATCH_SIZE,
                                                           word_to_idx,
                                                           shuffle=True):

        _predicts = evaluate_batch(model, sentences, seq_lengths)
        targets = torch.cat((targets, _targets), 0)
        predicts = torch.cat((predicts, _predicts), 0)

    # combine positive and negative as one class
    targets[targets == 2] = 1
    predicts[predicts == 2] = 1

    error_rate = (targets != predicts).sum() / targets.size(0)

    print(Counter(targets.numpy()), Counter(predicts.numpy()))
    print('error rate: ', error_rate)
    idx2label = {i: v for v, i in label_to_idx.items()}
    labels = [idx2label[idx] for idx in sorted(idx2label.keys())]
    print(
        'Report:\n',
        metrics.classification_report(targets.numpy(),
                                      predicts.numpy(),
                                      target_names=labels))
    print('Confusion matrix: \n',
          metrics.confusion_matrix(targets.numpy(), predicts.numpy()))

    return error_rate
Exemplo n.º 25
0
def main(args):
    # general
    if torch.cuda.is_available():
        device = torch.device("cuda")
        util.logging.info("Using CUDA")
    else:
        device = torch.device("cpu")
        util.logging.info("Using CPU")

    util.set_seed(args.seed)

    # init
    checkpoint_path = util.get_checkpoint_path(args)
    if not Path(checkpoint_path).exists():
        util.logging.info("Training from scratch")
        model, optimizer, stats = util.init(args, device)
    else:
        model, optimizer, stats, args = util.load_checkpoint(checkpoint_path, device)

    # train
    train.train(model, optimizer, stats, run_args=args)
Exemplo n.º 26
0
def main():

    model = util.load_checkpoint(pa.load_dir)

    with open(pa.category_name, 'r') as f:
        cat_to_name = json.load(f)


#     print(cat_to_name.get)

#     print(len(cat_to_name))

    top_p_list, top_flowers = util.predict(image_path, pa.load_dir,
                                           cat_to_name, pa.top_k)
    #     labels = [cat_to_name[str(index+1)] for index in np.naray(top_p_list[1][0])

    #     return print(top_p_list[0])
    i = 0
    while i < pa.top_k:
        print('{} with a probability of {}'.format(top_flowers[i],
                                                   top_p_list[i]))
        i += 1
def get_error_rate(model=None, training=False, report=False):
    """
    Compute the overall error rate of the trained model.

    If training is False, use test_data, otherwise training_data.
    If report is True, print precision, recall, F1-score, and confusion matrix.
    """
    model = model or load_checkpoint()['model']

    data, word_to_idx, label_to_idx = data_loader(training=training)

    targets = torch.LongTensor()
    predicts = torch.LongTensor()
    for sentences, _targets, seq_lengths in get_batch_data(data,
                                                           cfg.BATCH_SIZE,
                                                           word_to_idx,
                                                           shuffle=True):

        _predicts = evaluate_batch(model, sentences, seq_lengths)
        targets = torch.cat((targets, _targets), 0)
        predicts = torch.cat((predicts, _predicts), 0)

    error_rate = (targets != predicts).sum() / targets.size(0)

    if report:
        print('targets:', Counter(targets.numpy()), 'predicts:',
              Counter(predicts.numpy()))
        print('error rate: ', error_rate)
        idx2label = {i: v for v, i in label_to_idx.items()}
        labels = [idx2label[idx] for idx in sorted(idx2label.keys())]
        print(
            'Report:\n',
            metrics.classification_report(targets.numpy(),
                                          predicts.numpy(),
                                          target_names=labels))
        print('Confusion matrix: \n',
              metrics.confusion_matrix(targets.numpy(), predicts.numpy()))

    return error_rate
Exemplo n.º 28
0
  def __init__(self, model, batcher, vocab):
    """
    Args:
      model: a Seq2SeqAttentionModel object.
      batcher: a Batcher object.
      vocab: Vocabulary object
    """
    self._model = model
    self._batcher = batcher
    self._vocab = vocab
    self._saver = tf.train.Saver() # load checkpoints for decoding
    self._sess = tf.Session(config=util.get_config())

    # Load an initial checkpoint to use for decoding
    ckpt_path = util.load_checkpoint(self._saver, self._sess, checkpoint_directory = 'train')

    
    if self._model.single_pass:
      # Make a descriptive decode directory name
      #ckpt_name = "ckpt-" + ckpt_path.split('-')[-1] # this is something of the form "ckpt-123456"
      #self._decode_dir = os.path.join(self.model.log_folder, get_decode_dir_name(ckpt_name))
      self._decode_dir = self._model.decode_folder 
      if os.path.exists(self._decode_dir):
      	print("single_pass decode directory should not already exist")
        #raise Exception("single_pass decode directory %s should not already exist" % self._decode_dir)

    else: # Generic decode dir name
      self._decode_dir = self._model.decode_folder

    # Make the decode dir if necessary
    if not os.path.exists(self._decode_dir): os.mkdir(self._decode_dir)

    if self._model.single_pass:
      # Make the dirs to contain output written in the correct format for pyrouge
      self._rouge_ref_dir = os.path.join(self._decode_dir, "reference")
      if not os.path.exists(self._rouge_ref_dir): os.mkdir(self._rouge_ref_dir)
      self._rouge_dec_dir = os.path.join(self._decode_dir, "decoded")
      if not os.path.exists(self._rouge_dec_dir): os.mkdir(self._rouge_dec_dir)
Exemplo n.º 29
0
def main():
    print("==> loading model")
    num_classes = args.num_classes
    model = create_model(num_classes, args).to(device)
    model = load_checkpoint(model, checkpoint).to(device)

    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])

    input_data = args.input
    if os.path.isdir(input_data):
        input_folder_lst = [
            os.path.join(input_data, i) for i in os.listdir(input_data)
        ]
        for input_folder in input_folder_lst:
            print(os.path.basename(input_folder))
            input_data = os.path.join(input_folder, 'patch.json')
            filter_patches(model, input_data, transform)
    else:
        filter_patches(model, input_data, transform)
Exemplo n.º 30
0
def main(args):
    if torch.cuda.is_available():
        device = torch.device("cuda")
        util.logging.info("Using CUDA")
    else:
        device = torch.device("cpu")
        util.logging.info("Using CPU")

    if args.checkpoint_path is None:
        checkpoint_paths = list(util.get_checkpoint_paths())
    else:
        checkpoint_paths = [args.checkpoint_path]

    for checkpoint_path in checkpoint_paths:
        try:
            model, optimizer, stats, run_args = util.load_checkpoint(
                checkpoint_path, device=device)
        except FileNotFoundError as e:
            print(e)
            if "No such file or directory" in str(e):
                print(e)
                continue

        plot_losses(f"{util.get_save_dir(run_args)}/losses.pdf", stats.losses)