def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--image_list', required=True)
    parser.add_argument('--config', required=True)
    parser.add_argument('--dump_prefix', required=True)
    parser.add_argument('--output_dir', required=True)
    args = parser.parse_args()

    conf_mod = imp.load_source('config', args.config)
    config = conf_mod.get()

    model = config['model']
    utils.load_model(model, args.dump_prefix)

    X, _ = conf_mod.get_data(args.image_list)

    utils.mkdir_p(args.output_dir)
    image_list = utils.read_image_list(args.image_list)

    logger.info('compiling model ...')
    model.compile(loss='mean_squared_error', optimizer=Adam())

    for x, (input_path, _) in ProgressBar()(zip(X, image_list)):
        y = model.predict(np.array([x], dtype='float32'),
                               batch_size=1, verbose=False)
        img = np.round(y.reshape(y.shape[2:]) * 255.0).astype('uint8')

        # FIXME: we assume that basenames of images are distinct
        fname = os.path.basename(input_path)
        output_path = os.path.join(args.output_dir, fname)
        cv2.imwrite(output_path, img)
예제 #2
0
def main():
    parser = argparse.ArgumentParser(description='selfplaying script')
    parser.add_argument('--alice_model_file', type=str,
        help='Alice model file')
    parser.add_argument('--bob_model_file', type=str,
        help='Bob model file')
    parser.add_argument('--context_file', type=str,
        help='context file')
    parser.add_argument('--temperature', type=float, default=1.0,
        help='temperature')
    parser.add_argument('--verbose', action='store_true', default=False,
        help='print out converations')
    parser.add_argument('--seed', type=int, default=1,
        help='random seed')
    parser.add_argument('--score_threshold', type=int, default=6,
        help='successful dialog should have more than score_threshold in score')
    parser.add_argument('--max_turns', type=int, default=20,
        help='maximum number of turns in a dialog')
    parser.add_argument('--log_file', type=str, default='',
        help='log successful dialogs to file for training')
    parser.add_argument('--smart_alice', action='store_true', default=False,
        help='make Alice smart again')
    parser.add_argument('--fast_rollout', action='store_true', default=False,
        help='to use faster rollouts')
    parser.add_argument('--rollout_bsz', type=int, default=100,
        help='rollout batch size')
    parser.add_argument('--rollout_count_threshold', type=int, default=3,
        help='rollout count threshold')
    parser.add_argument('--smart_bob', action='store_true', default=False,
        help='make Bob smart again')
    parser.add_argument('--ref_text', type=str,
        help='file with the reference text')
    parser.add_argument('--domain', type=str, default='object_division',
        help='domain for the dialogue')
    args = parser.parse_args()

    utils.set_seed(args.seed)

    alice_model = utils.load_model(args.alice_model_file)
    alice_ty = get_agent_type(alice_model, args.smart_alice, args.fast_rollout)
    alice = alice_ty(alice_model, args, name='Alice')

    bob_model = utils.load_model(args.bob_model_file)
    bob_ty = get_agent_type(bob_model, args.smart_bob, args.fast_rollout)
    bob = bob_ty(bob_model, args, name='Bob')

    dialog = Dialog([alice, bob], args)
    logger = DialogLogger(verbose=args.verbose, log_file=args.log_file)
    ctx_gen = ContextGenerator(args.context_file)

    selfplay = SelfPlay(dialog, ctx_gen, args, logger)
    selfplay.run()
예제 #3
0
파일: main.py 프로젝트: lsp140510/Mobike
def test(**kwargs):
	
	# ---------------------- 更新参数 ----------------------
	opt = DefaultConfig()
	opt.update(**kwargs)
	opt.printf()

    # ---------------------- 数据处理 ----------------------

    # 获取数据
	train, test = get_test_data(opt)
	gc.collect()
 #    # 获取样本
	# test_sample = get_sample(train, test, load=True)
	# gc.collect()
 #    # 获取特征
	# test_feat = get_feat(train, test_sample)
	# gc.collect()

	# 保存特征至文件
	# test_feat.to_hdf('/home/xuwenchao/dyj-storage/all-feat/feat_{}.hdf'.format(test.shape[0]), 'w', complib='blosc', complevel=5)
	test_feat = pd.read_hdf('/home/xuwenchao/dyj-storage/all-feat/feat_{}.hdf'.format(test.shape[0]))
	test_feat = get_feat(train, test_feat)
	gc.collect()
	test_feat.to_hdf('/home/xuwenchao/dyj-storage/all-feat/feat_{}_filter.hdf'.format(test.shape[0]), 'w', complib='blosc', complevel=5)

    # ---------------------- 载入模型 ----------------------

	# opt['model_name'] = 'lgb_1_90_all.pkl'
	# gbm0, use_feat0 = load_model(opt)
	opt['model_name'] = 'lgb_2017-09-23#20:14:52_0.58893.pkl'
	gbm1, use_feat1 = load_model(opt)
	# opt['model_name'] = 'lgb_2_300_top15.pkl'
	# gbm2, use_feat2 = load_model(opt)
	# opt['model_name'] = 'lgb_3_300_top10.pkl'
	# gbm3, use_feat3 = load_model(opt)
	# opt['model_name'] = 'lgb_4_300_top5.pkl'
	# gbm4, use_feat4 = load_model(opt)
    
	# ---------------------- 保存预测结果 -------------------

	# test_feat.loc[:, 'pred'] = gbm0.predict(test_feat[use_feat0])
	# gc.collect()
	# res = test_feat[['orderid', 'geohashed_end_loc', 'pred']].sort_values(by=['orderid', 'pred'], ascending=False).groupby('orderid').head(25)
	# res[['orderid', 'geohashed_end_loc']].to_hdf('/home/xuwenchao/dyj-storage/sample_25_{}_filter_leak_sample.hdf'.format(test.shape[0]), 'w', complib='blosc', complevel=5)
	# gc.collect()

	# test_feat.loc[:, 'pred'] = gbm1.predict(test_feat[use_feat1])
	# test_feat[['orderid', 'geohashed_end_loc', 'pred']].to_hdf('/home/xuwenchao/dyj-storage/pred/pred_{}_0.58820.hdf'.format(test.shape[0]), 'w', complib='blosc', complevel=5)

	res = predict(test_feat, use_feat1, gbm1)
	test_feat[['orderid', 'geohashed_end_loc', 'pred']].to_hdf('/home/xuwenchao/dyj-storage/pred/pred_{}_0.58893.hdf'.format(test.shape[0]), 'w', complib='blosc', complevel=5)
	gc.collect()
	cur_time = datetime.datetime.now().strftime('%Y-%m-%d#%H:%M:%S')
	res_path = '{}/day{}_{}_wc_sample_0.58893.csv'.format(opt['result_dir'], opt['test_startday'], cur_time)
	res.to_csv(res_path, index=False)
	print('保存测试结果至:', res_path)
예제 #4
0
def main():
    model = utils.load_model()
    
    valid_df = utils.get_valid_df()
    
    predictions = model.predict(valid_df)
    predictions = predictions.reshape(len(predictions), 1)
    
    utils.write_submission(predictions)
예제 #5
0
파일: main.py 프로젝트: lsp140510/Mobike
def val(**kwargs):

	# ---------------------- 更新参数 ----------------------
	opt = DefaultConfig()
	opt.update(**kwargs)
	opt.printf()

	# ---------------------- 数据处理 ----------------------

	# 获取数据
	# train1, train2, train_test = get_train_data(opt)
	# 获取样本
	# train_sample = get_sample(train1, train2, load=True)
	# 获取特征
	# train_feat = get_feat(train_test, train_sample)
	# gc.collect()
    
	# train_feat.to_hdf('/home/xuwenchao/dyj-storage/all-feat/feat_{}.hdf'.format(opt['startday']), 'w', complib='blosc', complevel=5)
	train_feat = pd.read_hdf('/home/xuwenchao/dyj-storage/all-feat/feat_23_24_label.hdf')

    # ---------------------- 载入模型 ----------------------
	
	# opt['model_name'] = 'lgb_1_90_all.pkl'
	# gbm0, use_feat0 = load_model(opt)
	opt['model_name'] = 'lgb_1_2017-09-15#19:50:48_0.58820.pkl'
	gbm, use_feat = load_model(opt)
	opt['model_name'] = 'lgb_2017-09-23#20:14:52_0.58893.pkl'
	gbm1, use_feat1 = load_model(opt)
	# gbm2, use_feat2 = load_model(opt)
	# opt['model_name'] = 'lgb_2017-09-03#23:24:26_0.57836.pkl'
	# gbm3, use_feat3 = load_model(opt)
	# opt['model_name'] = ''
	# gbm4, use_feat4 = load_model(opt)

	# ---------------------- 评估 -------------------------

	train_feat.loc[:, 'pred'] = gbm.predict(train_feat[use_feat])
	gc.collect()
	train_feat[['orderid', 'geohashed_end_loc', 'pred']].to_csv('/home/xuwenchao/dyj-storage/pred/pred_23_24_0.58820.csv', index=None)
	train_feat.loc[:, 'pred'] = gbm1.predict(train_feat[use_feat1])
	gc.collect()
	train_feat[['orderid', 'geohashed_end_loc', 'pred']].to_csv('/home/xuwenchao/dyj-storage/pred/pred_23_24_0.58893.csv', index=None)
    def for_model(cls, network_build_fn, params_path=None, *args, **kwargs):
        """
        Construct a classifier, given a network building function
        and an optional path from which to load parameters.
        :param network_build_fn: network builder function of the form `fn(input_var, **kwargs) -> lasagne_layer`
        that constructs a network in the form of a Lasagne layer, given an input variable (a Theano variable)
        :param params_path: [optional] path from which to load network parameters
        :return: a classifier instance
        """
        # Prepare Theano variables for inputs and targets
        input_var = T.tensor4('inputs')
        target_var = T.ivector('targets')

        # Build the network
        print("Building model and compiling functions...")
        network = network_build_fn(input_var=input_var, **kwargs)
        # If a parameters path is provided, load them
        if params_path is not None:
            utils.load_model(params_path, network)
        return cls(input_var, target_var, network, *args, **kwargs)
예제 #7
0
def main():
    parser = argparse.ArgumentParser(description='Negotiator')
    parser.add_argument('--dataset', type=str, default='./data/negotiate/val.txt',
        help='location of the dataset')
    parser.add_argument('--model_file', type=str,
        help='model file')
    parser.add_argument('--smart_ai', action='store_true', default=False,
        help='to use rollouts')
    parser.add_argument('--seed', type=int, default=1,
        help='random seed')
    parser.add_argument('--temperature', type=float, default=1.0,
        help='temperature')
    parser.add_argument('--domain', type=str, default='object_division',
        help='domain for the dialogue')
    parser.add_argument('--log_file', type=str, default='',
        help='log file')
    args = parser.parse_args()

    utils.set_seed(args.seed)

    model = utils.load_model(args.model_file)
    ai = LstmAgent(model, args)
    logger = DialogLogger(verbose=True, log_file=args.log_file)
    domain = get_domain(args.domain)

    score_func = rollout if args.smart_ai else likelihood

    dataset, sents = read_dataset(args.dataset)
    ranks, n, k = 0, 0, 0
    for ctx, dialog in dataset:
        start_time = time.time()
        # start new conversation
        ai.feed_context(ctx)
        for sent, you in dialog:
            if you:
                # if it is your turn to say, take the target word and compute its rank
                rank = compute_rank(sent, sents, ai, domain, args.temperature, score_func)
                # compute lang_h for the groundtruth sentence
                enc = ai._encode(sent, ai.model.word_dict)
                _, ai.lang_h, lang_hs = ai.model.score_sent(enc, ai.lang_h, ai.ctx_h, args.temperature)
                # save hidden states and the utterance
                ai.lang_hs.append(lang_hs)
                ai.words.append(ai.model.word2var('YOU:'))
                ai.words.append(Variable(enc))
                ranks += rank
                n += 1
            else:
                ai.read(sent)
        k += 1
        time_elapsed = time.time() - start_time
        logger.dump('dialogue %d | avg rank %.3f | raw %d/%d | time %.3f' % (k, 1. * ranks / n, ranks, n, time_elapsed))

    logger.dump('final avg rank %.3f' % (1. * ranks / n))
def get_vocab(model):
    try:
        base_dir = model.other_params['base_dir']
        vocabulary_path = join(base_dir, 'vocabulary.pkl.gz')
        return load_model(vocabulary_path)
    except:
        try:
            ngram_filename = model.other_params['ngram_filename']
        except:
            ngram_filename = DEFAULT_NGRAM_FILE
        try:
            vocab_size = model.other_params['vocab_size']
        except:
            vocab_size = 50000
        return ngrams.NgramReader(ngram_filename, vocab_size=vocab_size).word_array
예제 #9
0
def main():
    parser = argparse.ArgumentParser(description='chat utility')
    parser.add_argument('--model_file', type=str,
        help='model file')
    parser.add_argument('--domain', type=str, default='object_division',
        help='domain for the dialogue')
    parser.add_argument('--context_file', type=str, default='',
        help='context file')
    parser.add_argument('--temperature', type=float, default=1.0,
        help='temperature')
    parser.add_argument('--num_types', type=int, default=3,
        help='number of object types')
    parser.add_argument('--num_objects', type=int, default=6,
        help='total number of objects')
    parser.add_argument('--max_score', type=int, default=10,
        help='max score per object')
    parser.add_argument('--score_threshold', type=int, default=6,
        help='successful dialog should have more than score_threshold in score')
    parser.add_argument('--seed', type=int, default=1,
        help='random seed')
    parser.add_argument('--smart_ai', action='store_true', default=False,
        help='make AI smart again')
    parser.add_argument('--ai_starts', action='store_true', default=False,
        help='allow AI to start the dialog')
    parser.add_argument('--ref_text', type=str,
        help='file with the reference text')
    args = parser.parse_args()

    utils.set_seed(args.seed)

    human = HumanAgent(domain.get_domain(args.domain))

    alice_ty = LstmRolloutAgent if args.smart_ai else LstmAgent
    ai = alice_ty(utils.load_model(args.model_file), args)


    agents = [ai, human] if args.ai_starts else [human, ai]

    dialog = Dialog(agents, args)
    logger = DialogLogger(verbose=True)
    # either take manually produced contextes, or relay on the ones from the dataset
    if args.context_file == '':
        ctx_gen = ManualContextGenerator(args.num_types, args.num_objects, args.max_score)
    else:
        ctx_gen = ContextGenerator(args.context_file)

    chat = Chat(dialog, ctx_gen, logger)
    chat.run()
예제 #10
0
def _train(net, training_data, validation_data, model_name, learning_rate, max_epochs, min_improvement):
    min_learning_rate = 1e-6
    best_validation_ppl = np.inf
    divide = False

    for epoch in range(1, max_epochs+1):
        
        epoch_start = time()
        
        print "\n======= EPOCH %s =======" % epoch
        print "\tLearning rate is %s" % learning_rate

        train_ppl = _process_corpus(net, training_data, mode='train', learning_rate=learning_rate) 
        print "\tTrain PPL is %.3f" % train_ppl

        validation_ppl = _process_corpus(net, validation_data, mode='test')
        print "\tValidation PPL is %.3f" % validation_ppl

        print "\tTime taken: %ds" % (time() - epoch_start)

        if np.log(validation_ppl) * min_improvement > np.log(best_validation_ppl): # Mikolovs recipe
            if not divide:
                divide = True
                print "\tStarting to reduce the learning rate..."
                if validation_ppl > best_validation_ppl:
                    print "\tLoading best model."
                    net = utils.load_model("../out/" + model_name)
            else:
                if validation_ppl < best_validation_ppl:
                    print "\tSaving model."
                    net.save("../out/" + model_name, final=True)
                break
        else:
            print "\tNew best model! Saving..."
            best_validation_ppl = validation_ppl
            final = learning_rate / 2. < min_learning_rate or epoch == max_epochs
            net.save("../out/" + model_name, final)

        if divide:
            learning_rate /= 2.
        
        if learning_rate < min_learning_rate:
            break
            
    print "-"*30
    print "Finished training."
    print "Best validation PPL is %.3f\n\n" % best_validation_ppl
예제 #11
0
def main():
    parser = argparse.ArgumentParser(description='testing script')
    parser.add_argument('--data', type=str, default='data/negotiate',
        help='location of the data corpus')
    parser.add_argument('--unk_threshold', type=int, default=20,
        help='minimum word frequency to be in dictionary')
    parser.add_argument('--model_file', type=str,
        help='pretrained model file')
    parser.add_argument('--seed', type=int, default=1,
        help='random seed')
    parser.add_argument('--hierarchical', action='store_true', default=False,
        help='use hierarchical model')
    parser.add_argument('--bsz', type=int, default=16,
        help='batch size')
    parser.add_argument('--cuda', action='store_true', default=False,
        help='use CUDA')
    args = parser.parse_args()

    device_id = utils.use_cuda(args.cuda)
    utils.set_seed(args.seed)

    corpus = data.WordCorpus(args.data, freq_cutoff=args.unk_threshold, verbose=True)
    model = utils.load_model(args.model_file)

    crit = Criterion(model.word_dict, device_id=device_id)
    sel_crit = Criterion(model.item_dict, device_id=device_id,
        bad_toks=['<disconnect>', '<disagree>'])


    testset, testset_stats = corpus.test_dataset(args.bsz, device_id=device_id)
    test_loss, test_select_loss = 0, 0

    N = len(corpus.word_dict)
    for batch in testset:
        # run forward on the batch, produces output, hidden, target,
        # selection output and selection target
        out, hid, tgt, sel_out, sel_tgt = Engine.forward(model, batch, volatile=False)

        # compute LM and selection losses
        test_loss += tgt.size(0) * crit(out.view(-1, N), tgt).data[0]
        test_select_loss += sel_crit(sel_out, sel_tgt).data[0]

    test_loss /= testset_stats['nonpadn']
    test_select_loss /= len(testset)
    print('testloss %.3f | testppl %.3f' % (test_loss, np.exp(test_loss)))
    print('testselectloss %.3f | testselectppl %.3f' % (test_select_loss, np.exp(test_select_loss)))
def run(cfg_name, load=False, max_tweets=DEFAULT_MAX_TWEETS):
    """ Run program """
    api = initialise_tweepy(cfg_name)
    m = model.Model(api=api, max_tweets=max_tweets)
    if load:
        print("Loading model...")
        cl = utils.load_model()
    else:
        print("Training fresh model...")
        cl = m.generate_model()

    while True:
        # Get users tweets and classify
        handle = get_user_input()
        if handle:
            tweet_list = utils.get_tweet_list(api, handle, max_tweets)
            p = cl.classify(tweet_list)
            print("Party: {0}".format(p))
        else:
            exit("Bye!")
                if punctuation == " ":
                    output_file.write("%s%s" % (punctuation, word))
                else:
                    if write_readable_text:
                        output_file.write("%s %s" % (punctuation[:1], word))
                    else:
                        output_file.write(" %s %s" % (punctuation, word))

            else:
                word = token
    
if __name__ == "__main__":
    
    if len(sys.argv) > 3:
        model_name = sys.argv[1]    
        net = utils.load_model(model_name)
        net.batch_size = 1
        net.reset_state()
        punctuation_reverse_map = utils.get_reverse_map(net.out_vocabulary)
        
        write_readable_text = bool(int(sys.argv[2]))
         
        output_file_path = sys.argv[3]
        if output_file_path == "-":
            output_file_path = sys.stdout

        if len(sys.argv) > 4:
            with open(sys.argv[4], 'r') as unpunctuated_file:
                unpunctuated_text = " ".join(unpunctuated_file.readlines())
        else:
            unpunctuated_text = " ".join(sys.stdin.readlines())
예제 #14
0
파일: main.py 프로젝트: webzerg/TCAN
def train(args):
    logging.info("start load parameters.")
    torch.manual_seed(args.seed)
    if args.dataset_name != 'mnist':
        num_chans = [args.nhid] * (args.levels - 1) + [args.emsize]
    else:
        num_chans = [args.nhid] * args.levels
    logger = SummaryWriter(args.dir_log)

    # load data
    logging.info("start load {} dataset.".format(args.dataset_name))
    train_dataset = RawDataset(args.dir_data_root, args.dataset_name, 'train',
                               args.seq_len, args.valid_len, args.is_corpus,
                               args.permute)
    valid_dataset = RawDataset(args.dir_data_root, args.dataset_name, 'valid',
                               args.seq_len, args.valid_len, args.is_corpus,
                               args.permute)
    test_dataset = RawDataset(args.dir_data_root, args.dataset_name, 'test',
                              args.seq_len, args.valid_len, args.is_corpus,
                              args.permute)
    train_dataloader = load_dataloader(train_dataset,
                                       args.batch_size,
                                       num_workers=args.num_workers)
    valid_dataloader = load_dataloader(valid_dataset,
                                       args.batch_size,
                                       num_workers=args.num_workers)
    test_dataloader = load_dataloader(test_dataset,
                                      args.batch_size,
                                      num_workers=args.num_workers)
    n_dict = train_dataset.n_dict
    logging.info("end -------------")

    # define model
    logging.info("start load model.")
    model = TCANet(args.emsize,
                   n_dict,
                   num_chans,
                   args.valid_len,
                   args.num_subblocks,
                   temp_attn=args.temp_attn,
                   nheads=args.nheads,
                   en_res=args.en_res,
                   conv=args.conv,
                   dropout=args.dropout,
                   emb_dropout=args.emb_dropout,
                   key_size=args.key_size,
                   kernel_size=args.ksize,
                   tied_weights=args.tied,
                   dataset_name=args.dataset_name,
                   visual=args.visual)

    num_parameters_train = sum(p.numel() for p in model.parameters()
                               if p.requires_grad)
    logging.info("Number of parameters = {}".format(num_parameters_train))

    if args.cuda:
        model.cuda(args.gpu_id)
    if args.is_parallel:
        model = nn.DataParallel(model)
        logging.info("The model is training with nn.DataParallel.")
    if args.continue_train:
        model = load_model(model, args)
        logging.info("Continue training, load saved model.")

    criterion = nn.CrossEntropyLoss()
    lr = args.lr
    optimizer = getattr(optim, args.optim)(model.parameters(), lr=lr)

    visual_info_all = []
    best_vloss = 1e8

    # start training
    logging.info("start training.")
    try:
        all_vloss = []
        for epoch in range(args.epochs):
            epoch_start_time = time.time()
            model.train()
            loss_sum = 0
            processed_data_size = 0
            correct_total = 0
            for i, (train_batch,
                    label_batch) in enumerate(tqdm(train_dataloader,
                                                   ncols=80)):
                optimizer.zero_grad()
                train_batch = train_batch.cuda(args.gpu_id)
                label_batch = label_batch.cuda(args.gpu_id)
                if args.temp_attn:
                    output_batch, attn_weight_list = model(train_batch)
                    if i == 1:
                        visual_info = [
                            train_batch, label_batch, attn_weight_list
                        ]

                else:
                    output_batch = model(train_batch)

                # Discard the effective history part
                eff_history = args.seq_len - args.valid_len
                if eff_history < 0:
                    raise ValueError(
                        "Valid sequence length must be smaller than sequence length!"
                    )

                if args.dataset_name != 'mnist':
                    label_batch = label_batch[:,
                                              eff_history:].contiguous().view(
                                                  -1)
                    output_batch = output_batch[:, eff_history:].contiguous(
                    ).view(-1, n_dict)
                else:
                    pred = output_batch.data.max(1, keepdim=True)[1]
                    correct_total += pred.eq(
                        label_batch.data.view_as(pred)).cpu().sum()
                loss_i = criterion(output_batch, label_batch)

                loss_i.backward()
                if args.clip > 0:
                    torch.nn.utils.clip_grad_norm_(model.parameters(),
                                                   args.clip)
                optimizer.step()
                if args.dataset_name != 'mnist':
                    loss_sum += (train_batch.size(1) -
                                 eff_history) * loss_i.item()
                    processed_data_size += train_batch.size(1) - eff_history
                else:
                    loss_sum += loss_i.item()
                    processed_data_size += 1

            if args.dataset_name == 'mnist':
                acc_train = 100 * float(correct_total) / len(train_dataset)
            loss_train = round(loss_sum / processed_data_size, 6)
            ppl_train = round(np.exp(loss_train), 4)
            epoch_end_time = time.time()

            # evaluate
            loss_val, ppl_val = evaluate(model, valid_dataloader, criterion,
                                         n_dict, args)
            loss_test, ppl_test = evaluate(model, test_dataloader, criterion,
                                           n_dict, args)

            # draw sequence correlation map
            if args.temp_attn and args.visual:
                visual_info_all.append(visual_info)
                if epoch == 0:
                    draw_attn(visual_info, epoch, args,
                              train_dataset.dictionary)
                else:
                    draw_attn(visual_info, epoch, args)

            # tensorboard
            if args.dataset_name == 'mnist':
                logging.info('| Epoch {}/{} | Time: {:.2f}s | train loss {:.2f} | train acc {:.2f} | test loss {:.2f}  | test acc {:.2f} |'\
                    .format(epoch+1, args.epochs, epoch_end_time-epoch_start_time, loss_train, acc_train, loss_test, ppl_test))
                logger_note = args.log
                logger.add_scalars('{}/train_loss'.format(logger_note),
                                   {'loss_train': loss_train}, epoch)
                logger.add_scalars('{}/train_acc'.format(logger_note),
                                   {'acc_train': acc_train}, epoch)
                logger.add_scalars('{}/test_loss'.format(logger_note),
                                   {'loss_test': loss_test}, epoch)
                logger.add_scalars('{}/test_acc'.format(logger_note),
                                   {'acc_test': ppl_test}, epoch)
            else:
                logging.info('| Epoch {}/{} | Time: {:.2f}s | train loss {:.2f} | train ppl {:.2f} | test loss {:.2f}  | test ppl {:.2f} |'\
                    .format(epoch+1, args.epochs, epoch_end_time-epoch_start_time, loss_train, ppl_train, loss_test, ppl_test))
                logger_note = args.log
                logger.add_scalars('{}/train_loss'.format(logger_note),
                                   {'loss_train': loss_train}, epoch)
                logger.add_scalars('{}/train_ppl'.format(logger_note),
                                   {'ppl_train': ppl_train}, epoch)
                logger.add_scalars('{}/test_loss'.format(logger_note),
                                   {'loss_test': loss_test}, epoch)
                logger.add_scalars('{}/test_ppl'.format(logger_note),
                                   {'ppl_test': ppl_test}, epoch)

            # Save the model if the validation loss is the best we've seen so far.
            if loss_val < best_vloss:
                save_model(model, args)
                best_vloss = loss_val

            # Anneal the learning rate if the validation loss plateaus
            if epoch > 5 and loss_val >= max(all_vloss[-5:]):
                lr = lr / 2.
                for param_group in optimizer.param_groups:
                    param_group['lr'] = lr
            all_vloss.append(loss_val)

    except KeyboardInterrupt:
        # after Ctrl + C, print final result
        logging.info('-' * 40)
        logging.info('Exiting from training early')
        model = load_model(model, args)
        loss_test, ppl_test = evaluate(model, test_dataloader, criterion,
                                       n_dict, args)
        logging.info('-' * 40)
        logging.info("log = {}".format(args.log))
        logging.info("Number of parameters = {}".format(num_parameters_train))
        if args.dataset_name == 'mnist':
            logging.info('| test loss {:.2f}  | test acc {:.2f}'.format(
                loss_test, ppl_test))
        else:
            logging.info('| test loss {:.2f}  | test ppl {:.2f}'.format(
                loss_test, ppl_test))
        logging.info('-' * 40)
        logger.close()
        end_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        # store result
        record(end_time, args.path_results, args.dataset_name, args.optim,
               args.key_size, args.vhdropout, args.levels, args.batch_size,
               args.epochs, args.lr, args.num_subblocks, args.en_res,
               args.temp_attn, loss_test, ppl_test, num_parameters_train,
               args.log)

    # print final result
    logger.close()
    model = load_model(model, args)
    loss_test, ppl_test = evaluate(model, test_dataloader, criterion, n_dict,
                                   args)
    logging.info('-' * 40)
    logging.info("log = {}".format(args.log))
    logging.info("Number of parameters = {}".format(num_parameters_train))
    if args.dataset_name == 'mnist':
        logging.info('| test loss {:.2f}  | test acc {:.2f}'.format(
            loss_test, ppl_test))
    else:
        logging.info('| test loss {:.2f}  | test ppl {:.2f}'.format(
            loss_test, ppl_test))
    logging.info('-' * 40)

    # store attention weights
    if args.temp_attn:
        save_visual_info(visual_info_all, args)
    end_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")

    # store result
    record(end_time, args.path_results, args.dataset_name, args.optim, args.key_size, args.vhdropout, args.levels, args.batch_size, \
        args.epochs, args.lr, args.num_subblocks, args.en_res, args.temp_attn, loss_test, ppl_test, num_parameters_train, args.log)
예제 #15
0
            pred_class = pred.argmax(1)[0]
            pred_score = pred[0][pred_class]
            data['event_info'][sid]['predicted_class'] = {
                'class': pred_class,
                'score': str(pred_score),
            }
        except:
            logger.exception('Error during quad processing of {}'.format(key))
    return data


def publish(data):
    client = utils.RabbitClient(queue=PUBLISH, host='rabbitmq')
    client.send(data, PUBLISH)


def main():
    rabbit_consume = utils.RabbitClient(queue=CONSUME, host='rabbitmq')
    rabbit_consume.receive(callback)


if __name__ == '__main__':
    args = utils.parse_arguments()

    logger.info('Loading model...')
    MODEL, VOCAB = utils.load_model(args)
    VOCAB_SIZE = len(VOCAB.keys())
    CHECK = set(VOCAB.keys())

    main()
예제 #16
0
def analyze(model,
            saver,
            sess,
            exp_string,
            data_generator,
            test_num_updates=None,
            NUM_ANALYSIS_POINTS=1,
            base_analysis=False,
            steps=[-1]):

    ### computing activations

    num_classes = data_generator.num_classes  # for classification, 1 otherwise
    np.random.seed(1)
    random.seed(1)

    print(exp_string)
    hid1, hid2, hid3, hid4, out, acc = [], [], [], [], [], []

    for step in steps:
        meta_hidden1s = []
        meta_hidden2s = []
        meta_hidden3s = []
        meta_hidden4s = []
        meta_outputs = []
        metaval_accuracies = []
        print(f"Load model {step}")
        load_model(FLAGS.logdir, exp_string, saver, sess, step)
        print(f"Load model {step} done!")
        for i in range(NUM_ANALYSIS_POINTS + 1):

            if i == 0:  # The first sample is the evaluation sample
                continue

            if 'generate' not in dir(data_generator):
                feed_dict = {}
                feed_dict = {model.meta_lr: 0.00}
            else:
                batch_x, batch_y, amp, phase = data_generator.generate(
                    train=False)

                if FLAGS.baseline == 'oracle':  # NOTE - this flag is specific to sinusoid
                    batch_x = np.concatenate([
                        batch_x,
                        np.zeros([batch_x.shape[0], batch_x.shape[1], 2])
                    ], 2)
                    batch_x[0, :, 1] = amp[0]
                    batch_x[0, :, 2] = phase[0]

                inputa = batch_x[:, :num_classes * FLAGS.update_batch_size, :]
                inputb = batch_x[:, num_classes * FLAGS.update_batch_size:, :]
                labela = batch_y[:, :num_classes * FLAGS.update_batch_size, :]
                labelb = batch_y[:, num_classes * FLAGS.update_batch_size:, :]

                feed_dict = {
                    model.inputa: inputa,
                    model.inputb: inputb,
                    model.labela: labela,
                    model.labelb: labelb,
                    model.meta_lr: 0.0
                }

            targets = [
                model.hiddens1, model.hiddens2, model.hiddens3, model.hiddens4,
                model.outputs,
                model.metaval_total_accuracy1 + model.metaval_total_accuracies2
            ]

            def reshape_elems_of_list(layers, shape=(model.dim_output, -1)):
                reshaped_layers = []
                for layer in layers:
                    layer = np.reshape(layer, shape)
                    reshaped_layers.append(layer)
                return reshaped_layers

            hidden1s, hidden2s, hidden3s, hidden4s, outputs, a = sess.run(
                targets, feed_dict)

            meta_hidden1s.append(reshape_elems_of_list(hidden1s))
            meta_hidden2s.append(reshape_elems_of_list(hidden2s))
            meta_hidden3s.append(reshape_elems_of_list(hidden3s))
            meta_hidden4s.append(reshape_elems_of_list(hidden4s))
            meta_outputs.append(reshape_elems_of_list(outputs))
            metaval_accuracies.append(a)

        hid1.append(meta_hidden1s)
        hid2.append(meta_hidden2s)
        hid3.append(meta_hidden3s)
        hid4.append(meta_hidden4s)
        out.append(meta_outputs)
        acc.append(metaval_accuracies)

    ### prepare for visualizing
    from rsa import plot_rsa_fancy, rsa
    layers = [hid1, hid2, hid3, hid4, out]
    if FLAGS.datasource == 'miniimagenet':
        layer_names = [
            "Pooling layer 1", "Pooling layer 2", "Pooling layer 3",
            "Pooling layer 4", "Logits/Head"
        ]
    else:
        layer_names = [
            "Convolution layer 1", "Convolution layer 2",
            "Convolution layer 3", "Convolution layer 4", "Logits/Head"
        ]

    final_base_representation = []
    final_mean_diff_to_base = []
    final_std_diff_to_base = []
    for i, (layer_name) in enumerate(layer_names):

        representations = []
        base_representations = []
        mean_diff_to_base = []
        std_diff_to_base = []
        labels = []
        colors = []
        inner_steps = [0, 1, 5, 10]

        for j, step in enumerate(steps):
            base_representations.append(layers[i][j][0][0])
            diff_to_base = []
            for k in range(NUM_ANALYSIS_POINTS):
                representations = representations + list(
                    map(layers[i][j][k].__getitem__, inner_steps))
                diff_to_base.append(
                    rsa(
                        np.array(
                            [base_representations[-1], representations[-1]])))
                colors = colors + [k + j * NUM_ANALYSIS_POINTS
                                   ] * len(inner_steps)
            mean_diff_to_base.append(np.mean(diff_to_base))
            std_diff_to_base.append(np.std(diff_to_base))
            labels = colors

        final = np.array(representations)
        final_base_representation.append(np.array(base_representations))
        final_mean_diff_to_base.append(np.array(mean_diff_to_base))
        final_std_diff_to_base.append(np.array(std_diff_to_base))

        if not base_analysis:
            plot_rsa_fancy(final,
                           labels,
                           colors,
                           method="correlation",
                           title=layer_name,
                           n_tasks=NUM_ANALYSIS_POINTS,
                           steps=steps)

    if base_analysis:
        plot_neighbour_analysis(steps, final_base_representation, layer_names)
        plot_base_analysis(steps, final_mean_diff_to_base,
                           final_std_diff_to_base, layer_names)
예제 #17
0
def main():
    # parse command line argument and generate config dictionary
    config = parse_args()

    logger.info(json.dumps(config, indent=2))

    run_config = config['run_config']
    optim_config = config['optim_config']
    data_config = config['data_config']
    human_tune = run_config['human_tune']
    print('human tune type: ', type(human_tune), human_tune)
#    if human_tune: human_tune_scores = []

   # set random seed
    seed = run_config['seed']
    torch.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)

    if not run_config['no_output']:
        # create output directory
        outdir = run_config['outdir']
        if not os.path.exists(outdir):
            os.makedirs(outdir)

        # save config as json file in output directory
        outpath = os.path.join(outdir, 'config.json')
        with open(outpath, 'w') as fout:
            json.dump(config, fout, indent=2)


    # load model
    logger.info('Loading model...')
    model = load_model(config['model_config'])
    n_params = sum([param.view(-1).size()[0] for param in model.parameters()])
    logger.info('n_params: {}'.format(n_params))


    if run_config['use_gpu']:
        model = nn.DataParallel(model)
        model.cuda()
    logger.info('Done')

    
    test_criterion = CrossEntropyLoss(size_average=True)

    master_scores = [] 
    master_labels = []
    master_outputs = [] 
    master_probs = []
   # load pretrained weights if given

    master_resume = run_config['resume']
    print('master directory is: ', master_resume)
        
    # load data loaders
    print('loading data loaders')
    print('loading human tune test loaders')
    test_loaders = \
                    get_loader(config['data_config'])
    

    run_config['resume'] = '{0}/fold_{1}/model_best_state_c10h_val_c10_acc.pth'.format(master_resume, fold)

    if os.path.isfile(run_config['resume']):
        print("=> loading checkpoint '{}'".format(run_config['resume']))
        checkpoint = torch.load(run_config['resume'])
        model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint '{}' (epoch {})"
                      .format(run_config['resume'], checkpoint['epoch']))
    else:
        print("=> no checkpoint found at '{}'".format(run_config['resume']))
    
        # get labels
    scores_test, labels_test, outputs_test, probs_test = test(checkpoint['epoch'], model, test_criterion, test_loaders, 
    run_config, human_tune)
    master_scores.append(scores_test)
    master_labels.append(labels_test)
    master_outputs.append(outputs_test)
    master_probs.append(probs_test)


    master_labels = np.concatenate(master_labels)
    print('master labels shape: ', master_labels.shape)

    print('master labels argmax[:5]: {0}, \n master argmax labels[-5:]: {1}'.format(np.argmax(master_labels[:5], axis = 1), np.argmax(master_labels[-5:], axis = 1)))
 
    master_outputs = np.vstack(master_outputs)
    print('master outputs shape: ', master_outputs.shape)
    master_probs = np.vstack(master_probs)
    print('master probs shape: ', master_probs.shape)

    c10h_outdir = run_config['c10h_scores_outdir']
    if not os.path.exists(c10h_outdir):
        os.makedirs(c10h_outdir)

    identifier = run_config['resume'].split('/')[-3] + '_' + run_config['resume'].split('/')[-4]
    print('identifier reduction: {0} to {1}'.format(str(run_config['resume']), identifier))
    s_dir = os.path.join(str(c10h_outdir), identifier) 

    # resave (overwrite) scores file with latest entries
    keys = master_scores[0].keys()
    print('keys: ', keys)

    with open(os.path.join(s_dir + '_master_scores.csv'), 'w') as output_file:    # changed from above
        dict_writer = csv.DictWriter(output_file, keys)
        dict_writer.writeheader()
        dict_writer.writerows(master_scores)
예제 #18
0
from numpy.linalg import norm
import theano
from collections import  Counter
import pdb
sys.path.append("/home/bookchan/data/job_clf/")

from utils import load_pkl,name2path,dump_pkl
from config.path import *
from utils.loader import Reader
from utils import  load_model,load_model_weights
from evaluate import evaluate_result



model_name = "pos_flat_cnn"
model = load_model( model_name)
model.compile(loss="mse",
                  optimizer="adam",
                  metrics=['accuracy'])
load_model_weights(model,"best")



layers = model.layers


conv1 =  layers[3]
cnn_input = conv1.get_input_at(0)
pred = model.get_output_at(0)
target = K.placeholder(ndim=len(model.get_output_shape_at(0)),name="target")
loss = K.mean(model.loss_functions[0](pred,target))
예제 #19
0
if __name__ == '__main__':
    cudnn.benchmark = True

    #load data
    train_data = datasets.Cornell
    train_data_loader = data.DataLoader(dataset=train_data,
                                        batch_size=config.BATCH_SIZE,
                                        shuffle=True,
                                        collate_fn=datasets.collate_fn)

    #load model
    encoder = models.Encoder(train_data.num_word, 512, 2, dropout=0.1)
    decoder = models.Decoder(train_data.num_word, 512, 2, 'dot', dropout=0.1)
    if config.LOAD == True:
        utils.load_model(encoder,
                         os.path.join('./Model', str(config.EPOCH_START)),
                         'encoder.pth')
        utils.load_model(decoder,
                         os.path.join('./Model', str(config.EPOCH_START)),
                         'decoder.pth')

    #set optimizer
    encoder_optim = optim.Adam(encoder.parameters(), lr=config.LR)
    decoder_optim = optim.Adam(decoder.parameters(), lr=config.LR * 5)

    #set loss and meter
    criterion = losses.MaskLoss()
    loss_meter = metrics.LossMeter()

    #train
    encoder.train()
예제 #20
0
    return hidden_out.max(1)


if __name__ == '__main__':
    arg_parser = argparse.ArgumentParser(
        description='DeepSpeech transcription')
    arg_parser = add_inference_args(arg_parser)
    arg_parser.add_argument('--audio-path',
                            default='audio.wav',
                            help='Audio file to predict on')
    arg_parser.add_argument('--offsets',
                            dest='offsets',
                            action='store_true',
                            help='Returns time offset information')
    arg_parser = add_decoder_args(arg_parser)
    args = arg_parser.parse_args()
    device = torch.device("cuda" if args.cuda else "cpu")
    model = load_model(device, args.model_path, args.half)

    spect_parser = SpectrogramParser(model.audio_conf, normalize=True)

    audio_path = '/home/xiao/code/ai_utils/experiment/child_det/audios/8f4d3b65-927e-4722-a794-d8037d6b561b.wav'
    val, idx = infer(
        # audio_path=args.audio_path,
        audio_path=audio_path,
        spect_parser=spect_parser,
        model=model,
        device=device,
        use_half=args.half)
    print('Infer result: {}'.format(idx.item()))
예제 #21
0
def main():
    parser = argparse.ArgumentParser(description='selfplaying script')
    parser.add_argument('--alice_model_file',
                        type=str,
                        help='Alice model file')
    parser.add_argument('--bob_model_file', type=str, help='Bob model file')
    parser.add_argument('--context_file', type=str, help='context file')
    parser.add_argument('--temperature',
                        type=float,
                        default=1.0,
                        help='temperature')
    parser.add_argument('--verbose',
                        action='store_true',
                        default=False,
                        help='print out converations')
    parser.add_argument('--seed', type=int, default=1, help='random seed')
    parser.add_argument(
        '--score_threshold',
        type=int,
        default=6,
        help='successful dialog should have more than score_threshold in score'
    )
    parser.add_argument('--max_turns',
                        type=int,
                        default=20,
                        help='maximum number of turns in a dialog')
    parser.add_argument('--log_file',
                        type=str,
                        default='',
                        help='log successful dialogs to file for training')
    parser.add_argument('--smart_alice',
                        action='store_true',
                        default=False,
                        help='make Alice smart again')
    parser.add_argument('--fast_rollout',
                        action='store_true',
                        default=False,
                        help='to use faster rollouts')
    parser.add_argument('--rollout_bsz',
                        type=int,
                        default=100,
                        help='rollout batch size')
    parser.add_argument('--rollout_count_threshold',
                        type=int,
                        default=3,
                        help='rollout count threshold')
    parser.add_argument('--smart_bob',
                        action='store_true',
                        default=False,
                        help='make Bob smart again')
    parser.add_argument('--ref_text',
                        type=str,
                        help='file with the reference text')
    parser.add_argument('--domain',
                        type=str,
                        default='object_division',
                        help='domain for the dialogue')
    args = parser.parse_args()

    utils.set_seed(args.seed)

    alice_model = utils.load_model(args.alice_model_file)
    alice_ty = get_agent_type(alice_model, args.smart_alice, args.fast_rollout)
    alice = alice_ty(alice_model, args, name='Alice')

    bob_model = utils.load_model(args.bob_model_file)
    bob_ty = get_agent_type(bob_model, args.smart_bob, args.fast_rollout)
    bob = bob_ty(bob_model, args, name='Bob')

    dialog = Dialog([alice, bob], args)
    logger = DialogLogger(verbose=args.verbose, log_file=args.log_file)
    ctx_gen = ContextGenerator(args.context_file)

    selfplay = SelfPlay(dialog, ctx_gen, args, logger)
    selfplay.run()
예제 #22
0
#!/usr/bin/env python
import json
import base64
import pickle
import io
import sys
from flask import Flask, request, jsonify
import utils

app = Flask(__name__)
model = utils.load_model(sys.argv[1])


@app.route('/car-recognize', methods=['POST'])
def car_recognize():
    try:
        data = json.loads(request.data)
        preds = utils.inference(model, data['content'])
        response = {'probabilities': preds}
        return jsonify(response)

    except BaseException as err:
        print(err)
        raise (err)


app.run()
예제 #23
0
                        help='how frequently output statistics')
    parser.add_argument("--data_path",
                        default='./data/mnist_prep',
                        help="path to processed data")
    parser.add_argument("--model_path",
                        default='./models',
                        help="save a trained model to this path")
    parser.add_argument('--log_path', default='./logs', help='path to logs')
    parser.add_argument('--seed', type=int, default=0, help='random seed')
    parser.add_argument("--sample_interval",
                        type=int,
                        default=400,
                        help="interval between image sampling")
    opt = parser.parse_args()

    train_data_s, train_data_t, test_data_s, test_data_t = joblib.load(
        os.path.join(opt.data_path, 'all_data.pkl'))

    opt.cuda = True if torch.cuda.is_available() else False
    np.random.seed(opt.seed)
    torch.manual_seed(opt.seed)

    if opt.domain == 'source':
        model, metrics_dict = train(opt)
    elif opt.domain == 'target':
        model_s = load_model(opt.model_path, 'source')
        model, metrics_dict = train(opt, model_s)

    save_model(model, opt.model_path, opt.domain)
    save_metrics(metrics_dict, opt.log_path, opt.domain)
예제 #24
0
def main():
    parser = argparse.ArgumentParser(description="-----[CNN-classifier]-----")
    parser.add_argument("--mode", default="train", help="train: train (with test) a model / test: test saved models")
    parser.add_argument("--model", default="rand", help="available models: rand, static, non-static, multichannel")
    parser.add_argument("--datafile", default="None", help="data file base to read in different datset (needs training, valid, and test files)")
    parser.add_argument("--dataset", default="TREC", help="available datasets: MR, TREC")
    parser.add_argument("--save_model", default=False, action='store_true', help="whether saving model or not")
    parser.add_argument("--early_stopping", default=False, action='store_true', help="whether to apply early stopping")
    parser.add_argument("--epoch", default=100, type=int, help="number of max epoch")
    parser.add_argument("--learning_rate", default=1.0, type=float, help="learning rate")
    parser.add_argument("--gpu", default=-1, type=int, help="the number of gpu to be used")

    options = parser.parse_args()
    if options.datafile == "None":
        data = getattr(utils, f"read_{options.dataset}")()
    else:
        data = utils.read_other(options.datafile)

    data["vocab"] = sorted(list(set([w for sent in data["train_x"] + data["dev_x"] + data["test_x"] for w in sent])))
    data["classes"] = sorted(list(set(data["train_y"])))
    data["word_to_idx"] = {w: i for i, w in enumerate(data["vocab"])}
    data["idx_to_word"] = {i: w for i, w in enumerate(data["vocab"])}

    params = {
        "MODEL": options.model,
        "DATASET": options.dataset,
        "DATAFILE": options.datafile,
        "SAVE_MODEL": options.save_model,
        "EARLY_STOPPING": options.early_stopping,
        "EPOCH": options.epoch,
        "LEARNING_RATE": options.learning_rate,
        "MAX_SENT_LEN": max([len(sent) for sent in data["train_x"] + data["dev_x"] + data["test_x"]]),
        "BATCH_SIZE": 50,
        "WORD_DIM": 300,
        "VOCAB_SIZE": len(data["vocab"]),
        "CLASS_SIZE": len(data["classes"]),
        "FILTERS": [3, 4, 5],
        "FILTER_NUM": [100, 100, 100],
        "DROPOUT_PROB": 0.5,
        "NORM_LIMIT": 3,
        "GPU": options.gpu
    }

    print("=" * 20 + "INFORMATION" + "=" * 20)
    print("MODEL:", params["MODEL"])
    if options.datafile == "None":
        print("DATASET:", params["DATASET"])
    else:
        print("DATAFILE:", params["DATAFILE"])
    print("VOCAB_SIZE:", params["VOCAB_SIZE"])
    print("EPOCH:", params["EPOCH"])
    print("LEARNING_RATE:", params["LEARNING_RATE"])
    print("EARLY_STOPPING:", params["EARLY_STOPPING"])
    print("SAVE_MODEL:", params["SAVE_MODEL"])
    print("=" * 20 + "INFORMATION" + "=" * 20)

    if options.mode == "train":
        print("=" * 20 + "TRAINING STARTED" + "=" * 20)
        model = train(data, params)
        if params["SAVE_MODEL"]:
            utils.save_model(model, params)
        print("=" * 20 + "TRAINING FINISHED" + "=" * 20)
    else:
        model = utils.load_model(params).cuda(params["GPU"])

        test_acc = test(data, model, params)
        print("test acc:", test_acc)
예제 #25
0
def test(model_dir,datagen_test):
    model_tfs, model_bigwig_names, features, model = utils.load_model(model_dir)
    model_predicts = model.predict_generator(datagen_test, val_samples=1+len(datagen_test)/100, pickle_safe=True,verbose=1)
    return model_predicts,model_tfs
예제 #26
0
import utils
import os
import cv2
import sys
from collections import defaultdict, Counter

TOLERANCE = 0.35


def uuuh_stats(vals):
    if len(vals) == 0:
        return None
    return sum(vals) / len(vals), min(vals)


model_storage = utils.load_model("modelv2_testing.pkl")
tree_model = utils.TreeModel(model_storage)


def run_vote_with_distance(data, distance):
    data = [d[0] for d in data if d[1] <= distance]
    if len(data) == 0:
        return None, None

    vcount = len(data)
    counter = Counter(data)
    # Too many people found
    if len(counter) > 2:
        return None, None
    sorted_vote = sorted(counter.items(), key=lambda x: x[1], reverse=True)
    #print sorted_vote, vcount//2
def do_train(train_texts, train_labels, dev_texts, dev_labels, lstm_shape, lstm_settings,
    lstm_optimizer, batch_size=100,
    do_fit1=True, epochs1=5, model1_path=None, config1_path=None,
    do_fit2=False, epochs2=2, model2_path=None, config2_path=None,
    epoch_path=None, lstm_type=1):
    """Train a Keras model on the sentences in `train_texts`
        All the sentences in a text have the text's label
        do_fit1: Fit with frozen word embeddings
        do_fit2: Fit with unfrozen word embeddings (after fitting with frozen embeddings) at a lower
                learning rate
    """

    print('do_train: train_texts=%s dev_texts=%s' % (dim(train_texts), dim(dev_texts)))
    best_epoch_frozen, best_epoch_unfrozen = -1, -1

    n_train_sents = count_sentences(train_texts, batch_size, 'train')
    X_train, y_train = make_sentences(lstm_shape['max_length'], batch_size,
        train_texts, train_labels, 'train', n_train_sents)
    validation_data = None
    if dev_texts is not None:
        n_dev_sents = count_sentences(dev_texts, batch_size, 'dev')
        X_val, y_val = make_sentences(lstm_shape['max_length'], batch_size,
            dev_texts, dev_labels, 'dev', n_dev_sents)
        validation_data = (X_val, y_val)
    sentence_cache.flush()

    print("Loading spaCy")
    nlp = sentence_cache._load_nlp()
    embeddings = get_embeddings(nlp.vocab)
    model = build_lstm[lstm_type](embeddings, lstm_shape, lstm_settings)
    compile_lstm(model, lstm_settings['lr'])

    callback_list = None

    if do_fit1:
        if validation_data is not None:
            ra_val = RocAucEvaluation(validation_data=validation_data, interval=1, frozen=True,
                model_path=model1_path, config_path=config1_path)
            early = EarlyStopping(monitor='val_auc', mode='max', patience=2, verbose=1)
            callback_list = [ra_val, early]
        else:
            sae = SaveAllEpochs(model1_path, config1_path, epoch_path, True)
            if sae.last_epoch1() > 0:
                xprint('Reloading partially built model 1')
                get_embeds = partial(get_embeddings, vocab=nlp.vocab)
                model = load_model(model1_path, config1_path, True, get_embeds)
                compile_lstm(model, lstm_settings['lr'])
                epochs1 -= sae.last_epoch1()
            callback_list = [sae]

        if epochs1 > 0:
            model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs1,
                      validation_data=validation_data, callbacks=callback_list, verbose=1)
            if validation_data is not None:
                best_epoch_frozen = ra_val.best_epoch
                ra_val.best_epoch = -1
            else:
                save_model(model, model1_path, config1_path, True)

    if do_fit2:
         # Reload the best model so far, if it exists
        if os.path.exists(model1_path) and os.path.exists(config1_path):
            model = load_model(model1_path, config1_path, True,
                partial(get_embeddings, vocab=nlp.vocab))
        xprint("Unfreezing")
        for layer in model.layers:
            layer.trainable = True
        compile_lstm(model, lstm_settings['lr'] / 10)
        if validation_data is not None:
            # Reset early stopping
            ra_val = RocAucEvaluation(validation_data=validation_data, interval=1,
                frozen=False, was_frozen=True,
                get_embeddings=partial(get_embeddings, vocab=nlp.vocab),
                do_prime=True, model_path=model1_path, config_path=config1_path)
            early = EarlyStopping(monitor='val_auc', mode='max', patience=2, verbose=1)
            callback_list = [ra_val, early]
        else:
            sae = SaveAllEpochs(model2_path, config2_path, epoch_path, False)
            if sae.last_epoch2() > 0:
                xprint('Reloading partially built model 2')
                get_embeds = partial(get_embeddings, vocab=nlp.vocab)
                model = load_model(model2_path, config2_path, False)
                compile_lstm(model, lstm_settings['lr'])
                epochs2 -= sae.last_epoch2()
            callback_list = [sae]

        if epochs2 > 0:
            model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs2,
                  validation_data=validation_data, callbacks=callback_list, verbose=1)
            best_epoch_unfrozen = ra_val.best_epoch
            if validation_data is None:
                save_model(model, model2_path, config2_path, False)

    del nlp
    return model, (best_epoch_frozen, best_epoch_unfrozen)
예제 #28
0
# Define obss preprocessor

preprocess_obss = utils.ObssPreprocessor(save_dir, envs[0].observation_space)

# Load training status

try:
    status = utils.load_status(save_dir)
except OSError:
    status = {"num_frames": 0, "update": 0}

# Define actor-critic model

try:
    acmodel = utils.load_model(save_dir)
    logger.info("Model successfully loaded\n")
except OSError:
    acmodel = ACModel(preprocess_obss.obs_space, envs[0].action_space,
                      not args.no_instr, not args.no_mem)
    logger.info("Model successfully created\n")
logger.info("{}\n".format(acmodel))

if torch.cuda.is_available():
    acmodel.cuda()
logger.info("CUDA available: {}\n".format(torch.cuda.is_available()))

# Define actor-critic algo

if args.algo == "a2c":
    algo = torch_rl.A2CAlgo(envs, acmodel, args.frames_per_proc, args.discount,
예제 #29
0
    def forward(self, im_data):

        basefeat = self.RCNN_base(im_data)

        # feed base feature map tp RPN to obtain rois
        rpn_feat = self.rpn(basefeat)

        rpn_cls_prob, rpn_bbox_pred = self.RCNN_rpn(rpn_feat)

        base_feat = self.sam([basefeat, rpn_feat])
        return [rpn_cls_prob, rpn_bbox_pred, base_feat]


net = _fasterRCNN()

net = load_model(
    net, "../snet_146_3/snet_146/pascal_voc_0712/thundernet_epoch_4.pth")
net.eval()
print('Finished loading model!')
print(net)
device = torch.device("cpu")
net = net.to(device)

##################export###############
output_onnx = 'thundernet146_rpn.onnx'
print("==> Exporting model to ONNX format at '{}'".format(output_onnx))
input_names = ["input"]
# output_names = ["hm" , "wh"  , "reg"]
output_names = ["rpn_cls_prob", "rpn_bbox_pred", "base_feat"]
inputs = torch.randn(1, 3, 320, 320).to(device)
torch_out = torch.onnx._export(net,
                               inputs,
        for name in name_list:
            s = re.findall(r'\d+', os.path.basename(name))[0]
            epoch_list.append(int(s))

        epoch_list.sort()
        epoch_st = epoch_list[-1]

    if epoch_st > 0:
        print(
            '====================================================================='
        )
        print('===> Resuming model from epoch %d' % epoch_st)
        print(
            '====================================================================='
        )
        three_dim_model, fusion_model, FlowNet, optimizer = utils.load_model(
            three_dim_model, fusion_model, FlowNet, optimizer, opts, epoch_st)

    print(three_dim_model)

    num_params = utils.count_network_parameters(three_dim_model)

    print(
        '\n====================================================================='
    )
    print("===> Model has %d parameters" % num_params)
    print(
        '====================================================================='
    )

    loss_dir = os.path.join(opts.model_dir, 'loss')
    loss_writer = SummaryWriter(loss_dir)
예제 #31
0
def main(args):
    paddle.seed(12345)
    config = load_yaml(args.config_yaml)
    use_gpu = config.get("dygraph.use_gpu", False)
    test_data_dir = config.get("dygraph.test_data_dir", None)
    print_interval = config.get("dygraph.print_interval", None)
    model_load_path = config.get("dygraph.infer_load_path", "increment_rank")
    start_epoch = config.get("dygraph.infer_start_epoch", 3)
    end_epoch = config.get("dygraph.infer_end_epoch", 5)
    batch_size = config.get("dygraph.batch_size", 128)

    place = paddle.set_device('gpu' if use_gpu else 'cpu')

    print("***********************************")
    logger.info(
        "use_gpu: {}, test_data_dir: {}, start_epoch: {}, end_epoch: {}, print_interval: {}, model_load_path: {}"
        .format(use_gpu, test_data_dir, start_epoch, end_epoch, print_interval,
                model_load_path))
    print("***********************************")

    rank_model = create_model(config)
    file_list = [
        os.path.join(test_data_dir, x) for x in os.listdir(test_data_dir)
    ]
    print("read data")
    dataset = MovieDataset(file_list)
    test_dataloader = create_data_loader(dataset, place=place, config=config)

    epoch_begin = time.time()
    interval_begin = time.time()

    for epoch_id in range(start_epoch + 1, end_epoch):

        logger.info("load model epoch {}".format(epoch_id))
        model_path = os.path.join(model_load_path, str(epoch_id))
        load_model(model_path, rank_model)
        runner_results = []
        for batch_id, batch in enumerate(test_dataloader()):
            batch_size = config.get("dygraph.batch_size", 128)
            batch_runner_result = {}

            user_sparse_inputs, mov_sparse_inputs, label_input = create_feeds(
                batch)

            predict = rank_model(batch_size, user_sparse_inputs,
                                 mov_sparse_inputs, label_input)

            uid = user_sparse_inputs[0]
            movieid = mov_sparse_inputs[0]
            label = label_input
            predict = predict

            if batch_id % print_interval == 0:
                logger.info(
                    "infer epoch: {}, batch_id: {}, uid: {}, movieid: {}, label: {}, predict: {},speed: {:.2f} ins/s"
                    .format(
                        epoch_id, batch_id, uid.numpy(), movieid.numpy(),
                        label.numpy(), predict.numpy(), print_interval *
                        batch_size / (time.time() - interval_begin)))
                interval_begin = time.time()

            batch_runner_result["userid"] = uid.numpy().tolist()
            batch_runner_result["movieid"] = movieid.numpy().tolist()
            batch_runner_result["label"] = label.numpy().tolist()
            batch_runner_result["predict"] = predict.numpy().tolist()
            runner_results.append(batch_runner_result)

        logger.info("infer epoch: {} done, epoch time: {:.2f} s".format(
            epoch_id,
            time.time() - epoch_begin))

        runner_result_save_path = config.get("dygraph.runner_result_dump_path",
                                             None)
        if runner_result_save_path:
            logging.info(
                "Dump runner result in {}".format(runner_result_save_path))
            with open(runner_result_save_path, 'w+') as fout:
                json.dump(runner_results, fout)
예제 #32
0
                                 batch_size=BATCH_SIZE,
                                 sampler=sampler_test,
                                 num_workers=N_WORKERS,
                                 pin_memory=True)

    # Create model    
    model = ModelFactory.create(config)    
    model.to(device, dtype=dtype)
    lr = float(LEARNING_RATE)
    optimizer = model.get_optimizer(model, lr)
    lr_scheduler = model.get_lr_scheduler(optimizer)       

    if DPATH_LOAD_CKPT:
        if not fpath_load_ckpt:
            fpath_load_ckpt = get_ckpt(DPATH_LOAD_CKPT, LOAD_POLICY) #get_best_ckpt_with_criterion(dpath_load_ckpt, LOAD_POLICY)
        load_model(fpath_load_ckpt, model)
        print("[%s]"%(LOAD_POLICY.upper()), fpath_load_ckpt, "has been loaded...")
    # end of if
    model = nn.DataParallel(model)

    loss_ce = nn.CrossEntropyLoss()   
    def classification_loss(logits, target_labels):
        return loss_ce(logits, target_labels)

    def classify(model, batch):
        covers, stegos = batch
        
        n_half = covers.shape[0]
        
        # Shuffle indices
        ind_base = np.arange(0, 2*n_half, 2, dtype=np.int32)
예제 #33
0
def test(model,
         saver,
         sess,
         exp_string,
         data_generator,
         test_num_updates=None):
    num_classes = data_generator.num_classes  # for classification, 1 otherwise

    np.random.seed(1)
    random.seed(1)

    steps = range(1000, 61000, 1000)
    accs = []

    inner_loops = 5
    for step in steps:
        print(f"Load model {step}")
        load_model(FLAGS.logdir, exp_string, saver, sess, step)

        metaval_accuracies = []

        for _ in range(NUM_TEST_POINTS):
            if 'generate' not in dir(data_generator):
                feed_dict = {}
                feed_dict = {model.meta_lr: 0.0}
            else:
                batch_x, batch_y, amp, phase = data_generator.generate(
                    train=False)

                if FLAGS.baseline == 'oracle':  # NOTE - this flag is specific to sinusoid
                    batch_x = np.concatenate([
                        batch_x,
                        np.zeros([batch_x.shape[0], batch_x.shape[1], 2])
                    ], 2)
                    batch_x[0, :, 1] = amp[0]
                    batch_x[0, :, 2] = phase[0]

                inputa = batch_x[:, :num_classes * FLAGS.update_batch_size, :]
                inputb = batch_x[:, num_classes * FLAGS.update_batch_size:, :]
                labela = batch_y[:, :num_classes * FLAGS.update_batch_size, :]
                labelb = batch_y[:, num_classes * FLAGS.update_batch_size:, :]

                feed_dict = {
                    model.inputa: inputa,
                    model.inputb: inputb,
                    model.labela: labela,
                    model.labelb: labelb,
                    model.meta_lr: 0.0
                }

            if model.classification:
                result = sess.run([model.metaval_total_accuracy1] +
                                  model.metaval_total_accuracies2, feed_dict)
            else:  # this is for sinusoid
                result = sess.run([model.total_loss1] + model.total_losses2,
                                  feed_dict)
            metaval_accuracies.append(result[inner_loops])

        accs.append(np.array(metaval_accuracies))

    plot_performance(steps, accs, NUM_TEST_POINTS)
예제 #34
0
            if punctuation.strip() == "":
                sys.stdout.write("%s%s%s" % (punctuation, tagstring, word))
            else:
                sys.stdout.write("%s %s%s" % (punctuation[:1], tagstring, word))

            first_word = False

        else:
            if is_word(token):
                word = token
            else:
                tags.append(token)

    sys.stdout.write("\n")
    sys.stdout.flush()

if __name__ == "__main__":
    
    assert len(sys.argv) > 1, "Give model path as first argument"

    model_path = sys.argv[1]
    net = utils.load_model(model_path)
    net.batch_size = 1

    punctuation_reverse_map = utils.get_reverse_map(net.out_vocabulary)

    for line in iter(sys.stdin.readline, ""):
        net.reset_state()
        write_punctuations(net, punctuation_reverse_map, line)
    
예제 #35
0
def main():
    if FLAGS.datasource == 'sinusoid':
        if FLAGS.train:
            test_num_updates = 5
        else:
            test_num_updates = 10
    else:
        if FLAGS.datasource == 'miniimagenet':
            if FLAGS.train == True:
                test_num_updates = 1  # eval on at least one update during training
            else:
                test_num_updates = 10
        else:
            test_num_updates = 10

    if FLAGS.train == False:
        orig_meta_batch_size = FLAGS.meta_batch_size
        # always use meta batch size of 1 when testing.
        FLAGS.meta_batch_size = 1

    if FLAGS.datasource == 'sinusoid':
        data_generator = DataGenerator(FLAGS.update_batch_size * 2,
                                       FLAGS.meta_batch_size)
    else:
        if FLAGS.metatrain_iterations == 0 and FLAGS.datasource == 'miniimagenet':
            assert FLAGS.meta_batch_size == 1
            assert FLAGS.update_batch_size == 1
            data_generator = DataGenerator(
                1, FLAGS.meta_batch_size)  # only use one datapoint,
        else:
            if FLAGS.datasource == 'miniimagenet':  # TODO - use 15 val examples for imagenet?
                if FLAGS.train:
                    data_generator = DataGenerator(
                        FLAGS.update_batch_size + 15, FLAGS.meta_batch_size
                    )  # only use one datapoint for testing to save memory
                else:
                    data_generator = DataGenerator(
                        FLAGS.update_batch_size * 2, FLAGS.meta_batch_size
                    )  # only use one datapoint for testing to save memory
            else:
                data_generator = DataGenerator(
                    FLAGS.update_batch_size * 2, FLAGS.meta_batch_size
                )  # only use one datapoint for testing to save memory

    dim_output = data_generator.dim_output
    if FLAGS.baseline == 'oracle':
        assert FLAGS.datasource == 'sinusoid'
        dim_input = 3
        FLAGS.pretrain_iterations += FLAGS.metatrain_iterations
        FLAGS.metatrain_iterations = 0
    else:
        dim_input = data_generator.dim_input

    if FLAGS.datasource == 'miniimagenet' or FLAGS.datasource == 'omniglot':
        tf_data_load = True
        num_classes = data_generator.num_classes

        if FLAGS.train:  # only construct training model if needed
            random.seed(5)
            image_tensor, label_tensor = data_generator.make_data_tensor()
            inputa = tf.slice(image_tensor, [0, 0, 0],
                              [-1, num_classes * FLAGS.update_batch_size, -1])
            inputb = tf.slice(image_tensor,
                              [0, num_classes * FLAGS.update_batch_size, 0],
                              [-1, -1, -1])
            labela = tf.slice(label_tensor, [0, 0, 0],
                              [-1, num_classes * FLAGS.update_batch_size, -1])
            labelb = tf.slice(label_tensor,
                              [0, num_classes * FLAGS.update_batch_size, 0],
                              [-1, -1, -1])
            input_tensors = {
                'inputa': inputa,
                'inputb': inputb,
                'labela': labela,
                'labelb': labelb
            }

        random.seed(6)
        image_tensor, label_tensor = data_generator.make_data_tensor(
            train=False,
            shuffle=False,
            analysis=FLAGS.analyze,
            points_to_analyze=FLAGS.points_to_analyze)
        inputa = tf.slice(image_tensor, [0, 0, 0],
                          [-1, num_classes * FLAGS.update_batch_size, -1])
        inputb = tf.slice(image_tensor,
                          [0, num_classes * FLAGS.update_batch_size, 0],
                          [-1, -1, -1])
        labela = tf.slice(label_tensor, [0, 0, 0],
                          [-1, num_classes * FLAGS.update_batch_size, -1])
        labelb = tf.slice(label_tensor,
                          [0, num_classes * FLAGS.update_batch_size, 0],
                          [-1, -1, -1])
        metaval_input_tensors = {
            'inputa': inputa,
            'inputb': inputb,
            'labela': labela,
            'labelb': labelb
        }

    else:
        tf_data_load = False
        input_tensors = None

    model = MAML(dim_input, dim_output, test_num_updates=test_num_updates)
    if FLAGS.train or not tf_data_load:
        model.construct_model(input_tensors=input_tensors, prefix='metatrain_')
    if tf_data_load:
        model.construct_model(input_tensors=metaval_input_tensors,
                              prefix='metaval_')
    model.summ_op = tf.summary.merge_all()

    saver = loader = tf.train.Saver(tf.get_collection(
        tf.GraphKeys.TRAINABLE_VARIABLES),
                                    max_to_keep=100)

    sess = tf.InteractiveSession()

    if FLAGS.train == False:
        # change to original meta batch size after loading model.
        FLAGS.meta_batch_size = orig_meta_batch_size

    if FLAGS.train_update_batch_size == -1:
        FLAGS.train_update_batch_size = FLAGS.update_batch_size
    if FLAGS.train_update_lr == -1:
        FLAGS.train_update_lr = FLAGS.update_lr

    exp_string = 'cls_' + str(FLAGS.num_classes) + '.mbs_' + str(
        FLAGS.meta_batch_size) + '.ubs_' + str(
            FLAGS.train_update_batch_size) + '.numstep' + str(
                FLAGS.num_updates) + '.updatelr' + str(FLAGS.train_update_lr)

    if FLAGS.num_filters != 64:
        exp_string += 'hidden' + str(FLAGS.num_filters)
    if FLAGS.max_pool:
        exp_string += 'maxpool'
    if FLAGS.stop_grad:
        exp_string += 'stopgrad'
    if FLAGS.baseline:
        exp_string += FLAGS.baseline
    if FLAGS.norm == 'batch_norm':
        exp_string += 'batchnorm'
    elif FLAGS.norm == 'layer_norm':
        exp_string += 'layernorm'
    elif FLAGS.norm == 'None':
        exp_string += 'nonorm'
    else:
        print('Norm setting not recognized.')

    resume_itr = 1  # We start with 1
    model_file = None

    tf.global_variables_initializer().run()
    tf.train.start_queue_runners()

    load_model(FLAGS.logdir, exp_string, saver, sess, FLAGS.test_iter)

    if FLAGS.train:
        train(model, saver, sess, exp_string, data_generator, resume_itr)
    else:
        if FLAGS.analyze:
            analyze(model,
                    saver,
                    sess,
                    exp_string,
                    data_generator,
                    test_num_updates,
                    FLAGS.points_to_analyze,
                    base_analysis=FLAGS.base_analysis,
                    steps=interpret_steps(FLAGS.steps_to_analyze))
        else:
            test(model, saver, sess, exp_string, data_generator,
                 test_num_updates)
예제 #36
0
 def load(self, model):
     self.t_lstm = load_model(model["t_lstm_file_name"])
     self.in_vocabulary = self.t_lstm.in_vocabulary
     super(TA_LSTM, self).load(model)       
def main():
    parser = argparse.ArgumentParser(description='Reinforce')
    parser.add_argument('--data', type=str, default=config.data_dir,
        help='location of the data corpus')
    parser.add_argument('--unk_threshold', type=int, default=config.unk_threshold,
        help='minimum word frequency to be in dictionary')
    parser.add_argument('--alice_model_file', type=str,
        help='Alice model file')
    parser.add_argument('--bob_model_file', type=str,
        help='Bob model file')
    parser.add_argument('--output_model_file', type=str,
        help='output model file')
    parser.add_argument('--context_file', type=str,
        help='context file')
    parser.add_argument('--temperature', type=float, default=config.temperature,
        help='temperature')
    parser.add_argument('--cuda', action='store_true', default=config.cuda,
        help='use CUDA')
    parser.add_argument('--verbose', action='store_true', default=False,
        help='print out converations')
    parser.add_argument('--seed', type=int, default=config.seed,
        help='random seed')
    parser.add_argument('--score_threshold', type=int, default=6,
        help='successful dialog should have more than score_threshold in score')
    parser.add_argument('--log_file', type=str, default='',
        help='log successful dialogs to file for training')
    parser.add_argument('--smart_bob', action='store_true', default=False,
        help='make Bob smart again')
    parser.add_argument('--gamma', type=float, default=0.95,
        help='discount factor')
    parser.add_argument('--eps', type=float, default=0.0,
        help='eps greedy')
    parser.add_argument('--nesterov', action='store_true', default=config.nesterov,
        help='enable nesterov momentum')
    parser.add_argument('--momentum', type=float, default=0.1,
        help='momentum for sgd')
    parser.add_argument('--lr', type=float, default=0.5,
        help='learning rate')
    parser.add_argument('--clip', type=float, default=1,
        help='gradient clip')
    parser.add_argument('--rl_lr', type=float, default=0.2,
        help='RL learning rate')
    parser.add_argument('--rl_clip', type=float, default=1,
        help='RL gradient clip')
    parser.add_argument('--ref_text', type=str,
        help='file with the reference text')
    parser.add_argument('--bsz', type=int, default=16,
        help='batch size')
    parser.add_argument('--sv_train_freq', type=int, default=4,
        help='supervision train frequency')
    parser.add_argument('--nepoch', type=int, default=4,
        help='number of epochs')
    parser.add_argument('--visual', action='store_true', default=False,
        help='plot graphs')
    parser.add_argument('--domain', type=str, default=config.domain,
        help='domain for the dialogue')

    ############ Minhao ###############
    parser.add_argument('--max_turns', type=int, default=20,
        help='max_turns in each dialogue')
    parser.add_argument('--rl_bob', action='store_true', default=False,
        help='make Bob smart again')
    parser.add_argument('--fixed_bob', action='store_true', default=False,
        help='make Bob smart again')
    args = parser.parse_args()

    device_id = utils.use_cuda(args.cuda)
    logging.info("Starting training using pytorch version:%s" % (str(torch.__version__)))
    logging.info("CUDA is %s" % ("enabled. Using device_id:"+str(device_id) + " version:" \
        +str(torch.version.cuda) + " on gpu:" + torch.cuda.get_device_name(0) if args.cuda else "disabled"))

    alice_model = utils.load_model(args.alice_model_file)
    # we don't want to use Dropout during RL
    alice_model.eval()
    # Alice is a RL based agent, meaning that she will be learning while selfplaying
    logging.info("Creating RlAgent from alice_model: %s" % (args.alice_model_file))
    alice = RlAgent(alice_model, args, name='Alice')

    # we keep Bob frozen, i.e. we don't update his parameters
    logging.info("Creating Bob's (--smart_bob) RLAgent" if args.rl_bob \
        else "Creating Bob's (not --smart_bob) LstmAgent" )
    
    #bob_ty = RlAgent if args.rl_bob else LstmAgent
    bob_ty = LstmRolloutAgent if args.smart_bob else LstmAgent
    bob_model = utils.load_model(args.bob_model_file)
    bob_model.eval()
    bob = bob_ty(bob_model, args, name='Bob')

    logging.info("Initializing communication dialogue between Alice and Bob")
    dialog = Dialog([alice, bob], args)
    logger = DialogLogger(verbose=args.verbose, log_file=args.log_file)
    ctx_gen = ContextGenerator(args.context_file)

    logging.info("Building word corpus, requiring minimum word frequency of %d for dictionary" % (args.unk_threshold))
    corpus = data.WordCorpus(args.data, freq_cutoff=args.unk_threshold)
    engine = Engine(alice_model, args, device_id, verbose=False)

    logging.info("Starting Reinforcement Learning")
    reinforce = Reinforce(dialog, ctx_gen, args, engine, corpus, logger)
    reinforce.run()

    logging.info("Saving updated Alice model to %s" % (args.output_model_file))
    utils.save_model(alice.model, args.output_model_file)
예제 #38
0
def main(inputs_path, output_obj, base_paths=None, meta_path=None,
         outfile_params=None):
    """
    Parameter
    ---------
    inputs_path : str
        File path for Galaxy parameters

    output_obj : str
        File path for ensemble estimator ouput

    base_paths : str
        File path or paths concatenated by comma.

    meta_path : str
        File path

    outfile_params : str
        File path for params output
    """
    with open(inputs_path, 'r') as param_handler:
        params = json.load(param_handler)

    base_estimators = []
    for idx, base_file in enumerate(base_paths.split(',')):
        if base_file and base_file != 'None':
            with open(base_file, 'rb') as handler:
                model = load_model(handler)
        else:
            estimator_json = (params['base_est_builder'][idx]
                              ['estimator_selector'])
            model = get_estimator(estimator_json)
        base_estimators.append(model)

    if meta_path:
        with open(meta_path, 'rb') as f:
            meta_estimator = load_model(f)
    else:
        estimator_json = params['meta_estimator']['estimator_selector']
        meta_estimator = get_estimator(estimator_json)

    options = params['algo_selection']['options']

    cv_selector = options.pop('cv_selector', None)
    if cv_selector:
        splitter, groups = get_cv(cv_selector)
        options['cv'] = splitter
        # set n_jobs
        options['n_jobs'] = N_JOBS

    if params['algo_selection']['estimator_type'] == 'StackingCVClassifier':
        ensemble_estimator = StackingCVClassifier(
            classifiers=base_estimators,
            meta_classifier=meta_estimator,
            **options)

    elif params['algo_selection']['estimator_type'] == 'StackingClassifier':
        ensemble_estimator = StackingClassifier(
            classifiers=base_estimators,
            meta_classifier=meta_estimator,
            **options)

    elif params['algo_selection']['estimator_type'] == 'StackingCVRegressor':
        ensemble_estimator = StackingCVRegressor(
            regressors=base_estimators,
            meta_regressor=meta_estimator,
            **options)

    else:
        ensemble_estimator = StackingRegressor(
            regressors=base_estimators,
            meta_regressor=meta_estimator,
            **options)

    print(ensemble_estimator)
    for base_est in base_estimators:
        print(base_est)

    with open(output_obj, 'wb') as out_handler:
        pickle.dump(ensemble_estimator, out_handler, pickle.HIGHEST_PROTOCOL)

    if params['get_params'] and outfile_params:
        results = get_search_params(ensemble_estimator)
        df = pd.DataFrame(results, columns=['', 'Parameter', 'Value'])
        df.to_csv(outfile_params, sep='\t', index=False)
예제 #39
0
    def test(dataset):
        # load BERT and GAN
        load_gan_model(D, G, config['gan_save_path'])
        if args.fine_tune:
            load_model(E, path=config['bert_save_path'], model_name='bert')

        test_dataloader = DataLoader(dataset, batch_size=args.predict_batch_size, shuffle=False, num_workers=2)
        n_sample = len(test_dataloader)
        result = dict()

        # Loss function
        detection_loss = torch.nn.BCELoss().to(device)
        classified_loss = torch.nn.CrossEntropyLoss(ignore_index=0).to(device)

        G.eval()
        D.eval()
        E.eval()

        all_detection_preds = []
        all_class_preds = []
        all_features = []

        for sample in tqdm.tqdm(test_dataloader):
            sample = (i.to(device) for i in sample)
            token, mask, type_ids, y = sample
            batch = len(token)

            # -------------------------evaluate D------------------------- #
            # BERT encode sentence to feature vector

            with torch.no_grad():
                sequence_output, pooled_output = E(token, mask, type_ids)
                real_feature = pooled_output

                # 大于2表示除了训练判别器还要训练分类器
                if n_class > 2:
                    f_vector, discriminator_output, classification_output = D(real_feature, return_feature=True)
                    all_detection_preds.append(discriminator_output)
                    all_class_preds.append(classification_output)

                # 只预测判别器
                else:
                    f_vector, discriminator_output = D.detect_only(real_feature, return_feature=True)
                    all_detection_preds.append(discriminator_output)
                if args.do_vis:
                    all_features.append(f_vector)

        all_y = LongTensor(dataset.dataset[:, -1].astype(int)).cpu()  # [length, n_class]
        all_binary_y = (all_y != 0).long()  # [length, 1] label 0 is oos
        all_detection_preds = torch.cat(all_detection_preds, 0).cpu()  # [length, 1]
        all_detection_binary_preds = convert_to_int_by_threshold(all_detection_preds.squeeze())  # [length, 1]

        # 计算损失
        detection_loss = detection_loss(all_detection_preds, all_binary_y.float())
        result['detection_loss'] = detection_loss

        if n_class > 2:
            class_one_hot_preds = torch.cat(all_class_preds, 0).detach().cpu()  # one hot label
            class_loss = classified_loss(class_one_hot_preds, all_y)  # compute loss
            all_class_preds = torch.argmax(class_one_hot_preds, 1)  # label
            class_acc = metrics.ind_class_accuracy(all_class_preds, all_y, oos_index=0)  # accuracy for ind class
            logger.info(metrics.classification_report(all_y, all_class_preds, target_names=processor.id_to_label))

        # logger.info(metrics.classification_report(all_binary_y, all_detection_binary_preds, target_names=['oos', 'in']))

        # report
        oos_ind_precision, oos_ind_recall, oos_ind_fscore, _ = metrics.binary_recall_fscore(all_detection_binary_preds, all_binary_y)
        detection_acc = metrics.accuracy(all_detection_binary_preds, all_binary_y)

        y_score = all_detection_preds.squeeze().tolist()
        eer = metrics.cal_eer(all_binary_y, y_score)

        result['eer'] = eer
        result['all_detection_binary_preds'] = all_detection_binary_preds
        result['detection_acc'] = detection_acc
        result['all_binary_y'] = all_binary_y
        result['all_y'] = all_y
        result['oos_ind_precision'] = oos_ind_precision
        result['oos_ind_recall'] = oos_ind_recall
        result['oos_ind_f_score'] = oos_ind_fscore
        result['score'] = y_score
        result['y_score'] = y_score
        result['auc'] = roc_auc_score(all_binary_y, y_score)
        if n_class > 2:
            result['class_loss'] = class_loss
            result['class_acc'] = class_acc
        if args.do_vis:
            all_features = torch.cat(all_features, 0).cpu().numpy()
            result['all_features'] = all_features

        freeze_data['test_all_y'] = all_y.tolist()
        freeze_data['test_all_pred'] = all_detection_binary_preds.tolist()
        freeze_data['test_score'] = y_score

        return result
예제 #40
0
    selected_folder = os.path.join(video_folder,"selected")
    window_folder = os.path.join(video_folder,"windows")


    print("STEP 1: Frames extraction...")
    image_list,ntotal = extract_frames_from_video(args.path,allframes_folder,skip=1,extension=configuration.extension)
    n = len(image_list)
    
    #parche
    #image_list = image_list[:100]
    #n = len(image_list)
    
    print("{} frames extracted".format(len(image_list)))

    #load the models
    binary_model = utils.load_model(configuration.model.classifier,configuration.model.classifier_weights)        
    window_model = utils.load_model(configuration.model.window,configuration.model.window_weights)       
     
    all_windows = range(configuration.window.start,configuration.window.end+1)

    images_ok = {}
    for i in range(configuration.window.start,configuration.window.end+1):
        images_ok[i] = []

    #try to find each configuration.frame_step frame
    print("STEP 2: Preliminary Classification...")
    for i in range(0,n,configuration.frame_step):
        image_name = image_list[i]
        print("processing image",i+1,image_name)
        
        #load the image
예제 #41
0
def run_model(model_name, data_dict, cuda):
    print("running ", model_name)
    if model_name not in IMPLEMENTED_MODELS:
        NotImplementedError(
            "You must choose one of these:{}".format(IMPLEMENTED_MODELS))
    else:

        emb_matrix = data_dict['emb_matrix']
        train_batches = data_dict['train_batches']
        val_batches = data_dict['val_batches']
        test_batches = data_dict['test_batches']
        set_seeds(SEED)

        harassment_f1_scores = []
        harassment_recall_scores = []
        harassment_precision_scores = []

        indirect_f1_scores = []
        indirect_recall_scores = []
        indirect_precision_scores = []

        sexual_f1_scores = []
        sexual_recall_scores = []
        sexual_precision_scores = []

        physical_f1_scores = []
        physical_recall_scores = []
        physical_precision_scores = []

        runs = CONFIG['iterations']
        for i in range(1, runs + 1):
            print("***** iteration: ", i)
            if model_name == "vanilla_projected_last":
                model = ProjectedVanillaRNN(
                    emb_matrix, embeddings_dropout=CONFIG['dropout'])
            elif model_name == "vanilla_projected_avg":
                model = ProjectedVanillaRNN(
                    emb_matrix,
                    avg_pooling=True,
                    embeddings_dropout=CONFIG['dropout'])
            elif model_name == "multi_attention":
                model = MultiAttentionRNN(
                    emb_matrix,
                    embeddings_dropout=CONFIG['dropout'],
                    trainable_embeddings=CONFIG['trainable_embeddings'])
            elif model_name == "multi_projected_attention":
                model = ProjectedMultiAttentionRNN(
                    emb_matrix, embeddings_dropout=CONFIG['dropout'])
            elif model_name == "attention":
                model = AttentionRNN(
                    emb_matrix,
                    embeddings_dropout=CONFIG['dropout'],
                    trainable_embeddings=CONFIG['trainable_embeddings'])
            elif model_name == "projected_attention":
                model = ProjectedAttentionRNN(
                    emb_matrix, embeddings_dropout=CONFIG['dropout'])
            elif model_name == "vanilla_avg":
                model = VanillaRnn(
                    emb_matrix,
                    avg_pooling=True,
                    embeddings_dropout=CONFIG['dropout'],
                    trainable_embeddings=CONFIG['trainable_embeddings'])

            else:
                model = VanillaRnn(
                    emb_matrix,
                    embeddings_dropout=CONFIG['dropout'],
                    trainable_embeddings=CONFIG['trainable_embeddings'])
            optimizer = Adam(model.params, CONFIG['lr'])
            criterion = BCEWithLogitsLoss()
            train(model=model,
                  train_batches=train_batches,
                  test_batches=val_batches,
                  optimizer=optimizer,
                  criterion=criterion,
                  epochs=CONFIG['epochs'],
                  init_patience=CONFIG['patience'],
                  cuda=cuda)
            model = load_model(model)
            d = generate_results(model, test_batches, cuda)
            df = generate_test_submission_values(
                harassment_dict=d['harassment'],
                sexual_dict=d['sexual'],
                physical_dict=d['physical'],
                indirect_dict=d['indirect'])
            df_results = pd.read_csv(TEST_RESULTS)
            harassment_f1_scores.append(
                f1_score(df_results.harassment.values, df.Harassment.values))
            harassment_precision_scores.append(
                precision_score(df_results.harassment.values,
                                df.Harassment.values))
            harassment_recall_scores.append(
                recall_score(df_results.harassment.values,
                             df.Harassment.values))

            indirect_f1_scores.append(
                f1_score(df_results.IndirectH.values, df.IndirectH.values))
            indirect_precision_scores.append(
                precision_score(df_results.IndirectH.values,
                                df.IndirectH.values))
            indirect_recall_scores.append(
                recall_score(df_results.IndirectH.values, df.IndirectH.values))

            sexual_f1_scores.append(
                f1_score(df_results.SexualH.values, df.SexualH.values))
            sexual_precision_scores.append(
                precision_score(df_results.SexualH.values, df.SexualH.values))
            sexual_recall_scores.append(
                recall_score(df_results.SexualH.values, df.SexualH.values))

            physical_f1_scores.append(
                f1_score(df_results.PhysicalH.values, df.PhysicalH.values))
            physical_precision_scores.append(
                precision_score(df_results.PhysicalH.values,
                                df.PhysicalH.values))
            physical_recall_scores.append(
                recall_score(df_results.PhysicalH.values, df.PhysicalH.values))

        results_dict = {
            'model': [model_name for _ in range(runs)],
            'harassment_f1_score': harassment_f1_scores,
            'harassment_recall': harassment_recall_scores,
            'harassment_precision': harassment_precision_scores,
            'indirect_f1_score': indirect_f1_scores,
            'indirect_recall': indirect_recall_scores,
            'indirect_precision': indirect_precision_scores,
            'sexual_f1_score': sexual_f1_scores,
            'sexual_recall': sexual_recall_scores,
            'sexual_precision': sexual_precision_scores,
            'physical_f1_score': physical_f1_scores,
            'physical_recall': physical_recall_scores,
            'physical_precision': physical_precision_scores
        }
        df = pd.DataFrame.from_dict(results_dict)
        if "results.csv" in os.listdir(RESULTS_DIR):
            df_old = pd.read_csv(RESULTS_DIR + "results.csv")
            df = pd.concat([df_old, df])
        df.to_csv(RESULTS_DIR + "results.csv", index=False)
예제 #42
0
#!/usr/bin/python
from utils import load_model, load_latest_model
from text_generation import generate_and_print, encode

import argparse

parser = argparse.ArgumentParser()
parser.add_argument("--model_path", help="directory where the model was saved")
parser.add_argument("--seed", help="seed for generator")
parser.add_argument("--chars", type=int,  help="how many characters to generate")
parser.add_argument("--diversity", type=float,  help="")
parser.add_argument("--load_latest", action='store_true',
                    help='if true looks for the latest epoch_xxxxx subdirectory '
                         'and loads the model from there. Otherwise looks directly in the'
                         'given directory.')
parser.add_argument("--out_file", type=str, help="where to put the output")
args = parser.parse_args()

if args.load_latest:
    print 'loading latest'
    model, _ = load_latest_model(args.model_path)
else:
    model = load_model(args.model_path)

encoded_text = encode(unicode(args.seed, encoding='utf8'))
generated = generate_and_print(model, encoded_text, args.diversity, args.chars)

if args.out_file is not None:
    with open(args.out_file, 'wb') as out:
        out.write(generated)
 def load(cls, nlp, model_path, config_path, frozen, methods, max_length):
     xprint('SentimentAnalyser.load: model_path=%s config_path=%s frozen=%s methods=%s max_length=%d' % (
          model_path, config_path, frozen, methods, max_length))
     get_embeds = partial(get_embeddings, vocab=nlp.vocab) if frozen else None
     model = load_model(model_path, config_path, frozen, get_embeds)
     return cls(model, methods=methods, max_length=max_length)
		noise_size=noise_size,output_size=num_classes,vocab=vocab,SOS_TOKEN=SOS_TOKEN,beam_width=config["model_config"]["generator"]["beam_width"]).to(device)
elif "hidden_size" in config["model_config"]["generator"] and "sim_size" in config["model_config"]["generator"] and \
"similarity" in config["model_config"]["generator"]:
	generator = getattr(generators,config["model_config"]["generator"]["name"])(hidden_size=config["model_config"]["generator"]["hidden_size"],
		noise_size=noise_size,output_size=num_classes,max_seq_len=max_seq_len,sim_size=config["model_config"]["generator"]["sim_size"],
		similarity=getattr(nn,config["model_config"]["generator"]["similarity"])(dim=-1),vocab=vocab,SOS_TOKEN=SOS_TOKEN,beam_width=config["model_config"]["generator"]["beam_width"]).to(device)
elif "TransformerGenerator" in config["model_config"]["generator"]["name"]:
	generator = getattr(generators,config["model_config"]["generator"]["name"])(hidden_size=config["model_config"]["generator"]["hidden_size"],
		num_heads=config["model_config"]["generator"]["num_heads"],noise_size=noise_size,output_size=num_classes,
		num_layers=config["model_config"]["generator"]["num_layers"],max_seq_len=max_seq_len,d_ff=config["model_config"]["generator"]["d_ff"],
		vocab=vocab,SOS_TOKEN=SOS_TOKEN,PAD_TOKEN=PAD_TOKEN,beam_width=config["model_config"]["generator"]["beam_width"]).to(device)
else:
	generator = getattr(generators,config["model_config"]["generator"]["name"])(hidden_size=config["model_config"]["generator"]["hidden_size"],
		noise_size=noise_size,output_size=num_classes,vocab=vocab,SOS_TOKEN=SOS_TOKEN,beam_width=config["model_config"]["generator"]["beam_width"]).to(device)

load_model(generator,summary_path)

text_log = open(os.path.join(summary_path,"eval_log.txt"),"a")

# losses
loss_weight = torch.ones(num_classes).to(device)
loss_weight[SOS_TOKEN] = 0.0
#loss_weight[EOS_TOKEN] = 0.0
#loss_weight[UNK_TOKEN] = 0.0
loss_weight[PAD_TOKEN] = 0.0
pretrain_loss_fun = nn.NLLLoss(weight=loss_weight)

np_g = num_parameters(generator)
text_log.write("Number of parameters for G: {}\n"
	  .format(np_g))
예제 #45
0
def main():
    tf.set_random_seed(1234)  # for producing the same images

    if not hasattr(keras.backend, "tf"):
        raise RuntimeError("This tutorial requires keras to be configured"
                           " to use the TensorFlow backend.")

    if keras.backend.image_dim_ordering() != 'tf':
        keras.backend.set_image_dim_ordering('tf')
        print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' to "
              "'th', temporarily setting to 'tf'")

    sess = tf.Session()
    keras.backend.set_session(sess)

    # load and preprocess dataset
    data_spec = DataSpec(batch_size=TOT_IMAGES,
                         scale_size=256,
                         crop_size=224,
                         isotropic=False)
    image_producer = ImageNetProducer(data_path=INPUT_DIR,
                                      num_images=TOT_IMAGES,
                                      data_spec=data_spec,
                                      batch_size=TOT_IMAGES)

    # Define input TF placeholder
    x = tf.placeholder(tf.float32, shape=(None, 224, 224, 3))
    y = tf.placeholder(tf.float32, shape=(None, 1000))
    class_num = 1000

    # load target model and produce data
    # model = preprocess layer + pretrained model
    from keras.applications.densenet import DenseNet121
    from keras.applications.densenet import preprocess_input
    pretrained_model = DenseNet121(weights='imagenet')
    image_producer.startover()
    target_model = keras_model_wrapper(pretrained_model,
                                       preprocess_input,
                                       x=x,
                                       y=y)
    for (indices, label, names, images) in image_producer.batches(sess):
        images = np.array(images)
        label = np_utils.to_categorical(np.array(label), class_num)
    accuracy = model_eval(sess,
                          x,
                          y,
                          target_model.predictions,
                          images,
                          label,
                          args={'batch_size': 32})
    print('Test accuracy of wrapped target model:{:.4f}'.format(accuracy))

    # data information
    x_test, y_test = images, label  # x_test [0, 255]
    print('loading %s images in total ', images.shape)
    print(np.min(x_test), np.max(x_test))

    # local attack specific parameters
    clip_min = args.lower
    clip_max = args.upper
    nb_imgs = args.nb_imgs
    li_eps = args.epsilon
    targeted_true = True if args.attack_type == 'targeted' else False
    k = args.K  # iteration
    a = args.learning_rate  # step size

    # Test the accuracy of targeted attacks, need to redefine the attack graph
    target_ys_one_hot, orig_images, target_ys, orig_labels = generate_attack_inputs(
        target_model, x_test, y_test, class_num, nb_imgs)

    # Set random seed to improve reproducibility
    tf.set_random_seed(args.seed)
    np.random.seed(args.seed)

    # test whether adversarial examples exsit, if no, generate it, otherwise, load it.
    prefix = "Results"
    prefix = os.path.join(prefix, str(args.seed))

    if not os.path.exists(prefix):  # no history info
        # load local models or define the architecture
        local_model_types = ['VGG16', 'VGG19', 'resnet50']
        local_model_ls = []
        pred_ls = []
        for model_type in local_model_types:
            pretrained_model, preprocess_input_func = load_model(model_type)
            local_model = keras_model_wrapper(pretrained_model,
                                              preprocess_input_func,
                                              x=x,
                                              y=y)
            accuracy = model_eval(sess,
                                  x,
                                  y,
                                  local_model.predictions,
                                  images,
                                  label,
                                  args={'batch_size': 32})
            print('Test accuracy of model {}: {:.4f}'.format(
                model_type, accuracy))
            local_model_ls.append(local_model)
            pred_ls.append(local_model.predictions)

        # load local model attack graph
        if targeted_true:
            orig_img_loss = compute_cw_loss(target_model,
                                            orig_images,
                                            target_ys_one_hot,
                                            targeted=targeted_true)
        else:
            orig_img_loss = compute_cw_loss(target_model,
                                            orig_images,
                                            orig_labels,
                                            targeted=targeted_true)

        local_attack_graph = LinfPGDAttack(local_model_ls,
                                           epsilon=li_eps,
                                           k=k,
                                           a=a,
                                           random_start=False,
                                           loss_func='xent',
                                           targeted=targeted_true,
                                           x=x,
                                           y=y)
        # pgd attack to local models and generate adversarial example seed
        if targeted_true:
            _, pred_labs, local_aes, pgd_cnt_mat, max_loss, \
            min_loss, ave_loss, max_gap, min_gap, ave_gap = local_attack_in_batches(sess,
                              orig_images,
                              target_ys_one_hot,
                              eval_batch_size = 1,
                              attack_graph=local_attack_graph,
                              model=target_model,
                              clip_min=clip_min,
                              clip_max=clip_max)
        else:
            _, pred_labs, local_aes, pgd_cnt_mat, max_loss, \
            min_loss, ave_loss, max_gap, min_gap, ave_gap = local_attack_in_batches(sess,
                              orig_images,
                              orig_labels,
                              eval_batch_size = 1,
                              attack_graph=local_attack_graph,
                              model=target_model,
                              clip_min=clip_min,
                              clip_max=clip_max)

        # calculate the loss for all adversarial seeds
        if targeted_true:
            adv_img_loss = compute_cw_loss(target_model,
                                           local_aes,
                                           target_ys_one_hot,
                                           targeted=targeted_true)
        else:
            adv_img_loss = compute_cw_loss(target_model,
                                           local_aes,
                                           orig_labels,
                                           targeted=targeted_true)

        success_rate = accuracy_score(target_ys, pred_labs)
        print(
            '** Success rate of targeted adversarial examples generated from local models: **'
            + str(success_rate))
        accuracy = accuracy_score(np.argmax(orig_labels, axis=1), pred_labs)
        print(
            '** Success rate of targeted adversarial examples generated by local models (untargeted): **'
            + str(1 - accuracy))

        # l-inf distance of orig_images and local_aes
        dist = local_aes - orig_images
        l_fin_dist = np.linalg.norm(dist.reshape(nb_imgs, -1), np.inf, axis=1)

        # save the generated local adversarial example ...
        os.makedirs(prefix)
        # save statistics
        fname = prefix + '/adv_img_loss.txt'
        np.savetxt(fname, adv_img_loss)
        fname = prefix + '/orig_img_loss.txt'
        np.savetxt(fname, orig_img_loss)
        fname = prefix + '/pgd_cnt_mat.txt'
        np.savetxt(fname, pgd_cnt_mat)
        fname = prefix + '/max_loss.txt'
        np.savetxt(fname, max_loss)
        fname = prefix + '/min_loss.txt'
        np.savetxt(fname, min_loss)
        fname = prefix + '/ave_loss.txt'
        np.savetxt(fname, ave_loss)
        fname = prefix + '/max_gap.txt'
        np.savetxt(fname, max_gap)
        fname = prefix + '/min_gap.txt'
        np.savetxt(fname, min_gap)
        fname = prefix + '/ave_gap.txt'
        np.savetxt(fname, ave_gap)

        # save output for local attacks
        fname = os.path.join(prefix, 'local_aes.npy')
        np.save(fname, local_aes)
        fname = os.path.join(prefix, 'orig_images.npy')
        np.save(fname, orig_images)
        fname = os.path.join(prefix, 'target_ys.npy')
        np.save(fname, target_ys)
        fname = os.path.join(prefix, 'target_ys_one_hot.npy')
        np.save(fname, target_ys_one_hot)
    else:
        print('loading data from files')
        local_aes = np.load(os.path.join(prefix, 'local_aes.npy'))
        orig_images = np.load(os.path.join(prefix, 'orig_images.npy'))
        target_ys = np.load(os.path.join(prefix, 'target_ys.npy'))
        target_ys_one_hot = np.load(
            os.path.join(prefix, 'target_ys_one_hot.npy'))

    assert local_aes.shape == (nb_imgs, 224, 224, 3)
    assert orig_images.shape == (nb_imgs, 224, 224, 3)
    assert target_ys.shape == (nb_imgs, )
    assert target_ys_one_hot.shape == (nb_imgs, class_num)

    print('begin NES attack')
    num_queries_list = []
    success_flags = []
    # fetch batch
    orig_images = orig_images[args.bstart:args.bend]
    target_ys = target_ys[args.bstart:args.bend]
    local_aes = local_aes[args.bstart:args.bend]
    # begin loop
    for idx in range(len(orig_images)):
        initial_img = orig_images[idx:idx + 1]
        target_class = target_ys[idx]
        if args.attack_seed_type == 'adv':
            print('attack seed is %s' % args.attack_seed_type)
            attack_seed = local_aes[idx]
        else:
            print('attack seed is %s' % args.attack_seed_type)
            attack_seed = orig_images[idx]
        _, num_queries, adv = nes_attack(sess, args, target_model, attack_seed,
                                         initial_img, target_class, class_num,
                                         IMAGE_SIZE)
        if num_queries == args.max_queries:
            success_flags.append(0)
        else:
            success_flags.append(1)
        num_queries_list.append(num_queries)

    # save query number and success
    fname = os.path.join(prefix,
                         '{}_num_queries.txt'.format(args.attack_seed_type))
    np.savetxt(fname, num_queries_list)
    fname = os.path.join(prefix,
                         '{}_success_flags.txt'.format(args.attack_seed_type))
    np.savetxt(fname, success_flags)

    print('finish NES attack')
def main(config, resume):
    logger = config.get_logger('test')
    arch = config.config['arch']
    autoencoder = arch.get('autoencoder', False)
    # setup data_loader instances
    data_loader = getattr(module_data, config['data_loader']['type'])(
        config['data_loader']['args']['data_dir'],
        batch_size=1,
        shuffle=False,
        validation_split=0.0,
        training=False,
        num_workers=2)

    # build model architecture
    model = config.initialize('arch', module_arch)
    logger.info(model)

    # get function handles of loss and metrics
    loss_fn = getattr(module_loss, config['loss'])
    metric_fns = [getattr(module_metric, met) for met in config['metrics']]

    logger.info('Loading checkpoint: {} ...'.format(resume))
    n_gpu = config['n_gpu']
    model = load_model(model, n_gpu, resume)

    # prepare model for testing
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = model.to(device)
    model.eval()

    total_loss = 0.0
    total_metrics = torch.zeros(len(metric_fns))

    results = []
    missed = []
    with torch.no_grad():
        for i, (data, target) in enumerate(tqdm(data_loader)):
            data, target = data.to(device), target.to(device)
            if autoencoder:
                target = data
            output = model(data)

            #
            # save sample images, or do something with output here
            #

            # computing loss, metrics on test set
            loss = loss_fn(output, target)
            batch_size = data.shape[0]
            total_loss += loss.item() * batch_size
            for i, metric in enumerate(metric_fns):
                total_metrics[i] += metric(output, target) * batch_size
            pred = list(output.argmax(dim=1).data.cpu().numpy())
            truth = list(target.data.cpu().numpy())
            if pred != truth:
                samp = data_loader.data.data[i]
                fn, y = samp
                misssed.append((pred, truth, fn, y, data.data.cpu().numpy()))
            results.append((pred, truth))
    results = [i for sublist in results for i in sublist]
    results = np.array(results)  #.astype('int32')
    results_fn = os.path.join(str(config.resume.parent), 'results.pkl')
    missed_fn = os.path.join(str(config.resume.parent), 'missed.pkl')
    with open(results_fn, 'wb') as fid:
        pickle.dump(results, fid)
    with open(missed_fn, 'wb') as fid:
        pickle.dump(missed, fid)
    n_samples = len(data_loader.sampler)
    log = {'loss': total_loss / n_samples}
    log.update({
        met.__name__: total_metrics[i].item() / n_samples
        for i, met in enumerate(metric_fns)
    })
    logger.info(log)
    else:
        options['cv'] = list( splitter.split(X, y, groups=groups) )
    options['n_jobs'] = N_JOBS
    primary_scoring = options['scoring']['primary_scoring']
    options['scoring'] = get_scoring(options['scoring'])
    if options['error_score']:
        options['error_score'] = 'raise'
    else:
        options['error_score'] = np.NaN
    if options['refit'] and isinstance(options['scoring'], dict):
        options['refit'] = 'primary'
    if 'pre_dispatch' in options and options['pre_dispatch'] == '':
        options['pre_dispatch'] = None

    with open(infile_pipeline, 'rb') as pipeline_handler:
        pipeline = load_model(pipeline_handler)

    search_params = get_search_params(params_builder)
    searcher = optimizer(pipeline, search_params, **options)

    if options['error_score'] == 'raise':
        searcher.fit(X, y)
    else:
        warnings.simplefilter('always', FitFailedWarning)
        with warnings.catch_warnings(record=True) as w:
            try:
                searcher.fit(X, y)
            except ValueError:
                pass
            for warning in w:
                print(repr(warning.message))
예제 #48
0
        '-mx',
        '--seq_max_len',
        default=256,
        help="BEST results: same value as when training the Model")

    args = parser.parse_args()

    EVALUATE_PREDICATES = utils.get_bool_value(args.eval_preds)
    device, USE_CUDA = utils.get_torch_device()
    file_has_gold = utils.get_bool_value(args.gold_labels)
    SEQ_MAX_LEN = int(args.seq_max_len)
    BATCH_SIZE = int(args.batch_size)

    # Load Saved Model
    model, tokenizer = utils.load_model(
        BertForTokenClassification, BertTokenizer,
        f"{args.model_dir}/EPOCH_{args.epoch}")
    label2index = utils.load_label_dict(f"{args.model_dir}/label2index.json")
    index2label = {v: k.strip("B-") for k, v in label2index.items()}

    # Load File for Predictions
    _, prediction_inputs, prediction_masks, gold_labels, seq_lens, gold_predicates = utils.load_srl_dataset(
        args.test_path,
        tokenizer,
        include_labels=True,
        max_len=SEQ_MAX_LEN,
        label2index=label2index)

    # Create the DataLoader.
    prediction_data = TensorDataset(prediction_inputs, prediction_masks,
                                    gold_labels, seq_lens, gold_predicates)
예제 #49
0
 def __call__(self, sess, epoch, iteration, model, loss, processed):
     if epoch == self.at_epoch:
         print("Loading model...")
         model = load_model(sess, self.path + "latest/")
예제 #50
0
    with open(results_path, 'a') as resfile:
        wr = csv.writer(resfile)
        wr.writerow([
            'Epoch', 'Train Loss', 'Val WER', 'Val CER', 'Val LER',
            'Train Time', 'Val Time'
        ])

    batch_time = AverageMeter()
    data_time = AverageMeter()
    train_losses = AverageMeter()

    # Model instatiation.
    train_loss, start_epoch, optim_state = 0, 0, None
    if args.continue_from:  # Starting from previous model
        logger.info(f"Loading checkpoint model {args.continue_from}")
        model, package = load_model(args.continue_from)
        labels = model.labels
        audio_conf = model.audio_conf
        model_conf = model.model_conf
        if not args.finetune:  # Don't want to restart training
            optim_state = package['optim_dict']
            start_epoch = int(package.get(
                'epoch', 0)) + 1  # Index start at 0 for training
            train_loss = int(package.get('avg_loss', 0))
            for i in range(start_epoch):
                train_results[i] = package['train_results'][i]
                val_results[i] = package['val_results'][i]
            best_wer = float(val_results[:start_epoch].min())
    else:
        with open(args.labels_path) as label_file:
            labels = json.load(label_file)
예제 #51
0
        '/media/esepulveda/Elements/4-training/1.33-5',
        '/media/esepulveda/Elements/4-training/1.33-6',
        '/media/esepulveda/Elements/4-training/1.33-7',
        '/media/esepulveda/Elements/4-training/1.35',   
    ]
    images = set_cross_validation(folders)


    image_list = []
    for i,ims in enumerate(images):
        image_list += ims

    n = len(image_list)
        
    #load binary_classifier
    binary_model = utils.load_model(configuration.model.classifier + ".json",configuration.model.classifier + ".h5")

        
    target_size = (configuration.input.binary_width,configuration.input.binary_height)

    fn = 0
    fp = 0
    tn = 0
    tp = 0

    for i,(image_name,tag) in enumerate(image_list):
        #load the image
        image = utils.load_image_opencv(image_name)
        image = cv2.resize(image,target_size) 
        image = np.moveaxis(image,-1,0)
        image = image[np.newaxis,:]
예제 #52
0
import acc_fc
import rank_selection
import collections
import json
import sys

parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model',  help='the model to speed up')
parser.add_argument('-g', '--gpus', default='0', help='the gpus will be used, e.g "0,1,2,3"')
parser.add_argument('--load-epoch',type=int, default=1, help="load the model on an epoch using the model-prefix")
parser.add_argument('--save-model', type=str, default='new-model', help='output model prefix')
parser.add_argument('--config', default=None, help='specify the config file')
parser.add_argument('--ratio', type=float, default=2, help='speed up ratio')
args = parser.parse_args()

model = utils.load_model(args)
if args.config:
  args.config = json.load(open(args.config, 'r'))
else:
  config = {}
  config['conv_params'] = rank_selection.get_ranksel(model, args.ratio)
  config['fc_params'] = {}
  json.dump(config, open('config-rksel-%.1f.json'%(args.ratio), 'w'), indent=2)
  args.config = config

new_model = model
Args = collections.namedtuple('ConvArgs', 'layer K')
for layer, K in args.config['conv_params'].items():
  arg = Args(layer=layer, K=K)
  new_model = acc_conv.conv_vh_decomposition(new_model, arg)
for layer, K in args.config['fc_params'].items():
예제 #53
0
ctx = mx.gpu(4)

utils.log_init()

val_data = ds.load_quickdraw10(batch_size)
data_iter = iter(val_data)
def data_iter_func():
    data, label = next(data_iter)
    return data, label
data, _ = next(data_iter)

root = "/data/wlt/train_7/"
sym_file = root + "quickdraw_wlt_augmentation_epoch-4-0.8164531394275162.json"
prm_file = root + "quickdraw_wlt_augmentation_epoch-4-0.8164531394275162.params"

net1 = utils.load_model(sym_file, prm_file, inputs, ctx=ctx)
acc = mx.metric.Accuracy()
def quickdraw(data, label):
    res = net1.forward(data.as_in_context(ctx))
    acc.update(label, res)
    return "accuracy={:6.2%}".format(acc.get()[1])

if True:
    sym, params = mx.sym.load(sym_file), nd.load(prm_file)
    sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)
    mrt = _mrt.MRT(sym, params, inputs_ext)
    mrt.set_data('data', data)
    mrt.calibrate(ctx=ctx)
    mrt.set_output_prec(8)
    qsym, qparams, inputs_ext = mrt.quantize()
예제 #54
0
        '/media/esepulveda/Elements/4-training/1.33-5',
        '/media/esepulveda/Elements/4-training/1.33-6',
        '/media/esepulveda/Elements/4-training/1.33-7',
        '/media/esepulveda/Elements/4-training/1.35',   
    ]
    images = set_cross_validation(folders)


    image_list = []
    for i,ims in enumerate(images):
        image_list += ims

    n = len(image_list)
        
    #load binary_classifier
    window_model = utils.load_model(configuration.model.window + ".json",configuration.model.window + ".h5")
    #loading templates
    templates = classify_template_matching.load_templates(configuration)
    all_windows = range(configuration.window.start,configuration.window.end+1)
    true_values = {}
    predictions = {}
    for w in all_windows:
        true_values[w] = 0
        predictions[w] = 0

    target_size = (configuration.input.binary_width,configuration.input.binary_height)

    fn = 0
    fp = 0
    tn = 0
    tp = 0
def simple_lrp_demo(num_images = 1):
    """
    Simple example to demonstrate the LRP methods using the Caffe python interface.
    Calculates the prediction and LRP heatmap for num_images of example imaages from the EXAMPLE_IMAGE_FOLDER
    """


    # load the pre-trained model
    net = load_model(model = MODEL)

    if MODEL == 'googlenet':
        in_hei = 224
        in_wid = 224
    else:
        # default: caffenet
        in_hei = 227
        in_wid = 227

    # load imagenet mean and crop it to fit the networks input dimensions
    cropped_mean = cropped_imagenet_mean(IMAGENET_MEAN_LOCATION, in_hei, in_wid)

    # load example iamge
    image_paths = [os.path.join(EXAMPLE_IMAGE_FOLDER, EXAMPLE_IMAGE_PATH) for EXAMPLE_IMAGE_PATH in os.listdir(EXAMPLE_IMAGE_FOLDER)[:num_images]]
    example_images = [Image.open(img_pth) for img_pth in image_paths]

    # preprocess image to fit caffe input convention (subtract mean, swap input dimensions (input blob convention is NxCxHxW), transpose color channels to BGR)
    transformed_input = np.array([transform_input(example_image, True, True, in_hei = in_hei, in_wid = in_wid, mean=cropped_mean)for example_image in example_images])

    # adapt caffe batchsize to avoid unnecessary computations
    net.blobs['data'].reshape(*transformed_input.shape)

    # classification (forward pass)
    # the lrp_hm convenience method always performs a forward pass anyways, the output here is only used to output the top predictions later
    net.blobs['data'].data[...] = transformed_input
    out = net.forward()
    top_predictions = np.argmax(out['prob'], axis=1)


    ## ############# ##
    # LRP parameters: #
    ## ############# ##
    lrp_type    = 'epsilon'
    # lrp_type              | meaning of lrp_param  | uses switch_layer | description 
    # ---------------------------------------------------------------------------
    # epsilon               | epsilon               | no                | epsilon lrp
    # alphabeta             | beta                  | no                | alphabeta lrp, alpha = 1-beta
    # eps_n_flat            | epsilon               | yes               | epsilon lrp until switch_layer,   wflat lrp for all layers below
    # eps_n_wsquare         | epsilon               | yes               | epsilon lrp until switch_layer,   wsquare lrp for all layers below
    # ab_n_flat             | beta                  | yes               | alphabeta lrp until switch_layer, wflat lrp for all layers below
    # ab_n_wsquare          | beta                  | yes               | alphabeta lrp until switch_layer, wsquare lrp for all layers below
    # eps_n_ab              | (epsilon, beta)       | yes               | epsilon lrp until switch_layer, alphabeta lrp for all layers below
    # layer_dep             | (epsilon, beta)       | no                | epsilon lrp for all fully-connected layers, alphabeta lrp with alpha=1 for all convolution layerrs
    # layer_dep_n_flat      | (epsilon, beta)       | yes               | layer_dep (see above) until switch_layer, wflat lrp for all layers below
    # layer_dep_n_wsquare   | (epsilon, beta)       | yes               | layer_dep (see above) until switch-layer, wsquare lrp for all layers below

    # depending on lrp_type, lrp_param needs to be a scalar or a tuple (see table above). If a scalar is given to an lrp_type that expects a tuple, the default epsilon=0., alpha=0. 
    lrp_param   =   1e-10

    # switch_layer param only needed for the composite methods
    # the parameter depicts the first layer for which the second formula type is used.
    # interesting values for caffenet are: 0, 4, 8, 10, 12 | 15, 18, 21 (convolution layers | innerproduct layers)
    switch_layer = 13

    classind    =  -1              # (class index  | -1 for top_class)


    ## ################################## ##
    # Heatmap calculation and presentation #
    ## ################################## ##

    # LRP
    backward = lrp_hm(net, transformed_input, lrp_method=lrp_type, lrp_param=lrp_param, target_class_inds=classind, switch_layer=switch_layer)

    if backward is None:
        print('----------ERROR-------------')
        print('LRP result is None, check lrp_type and lrp_param for corectness')
        return

    sum_over_channels  = True
    normalize_heatmap  = False

    if lrp_type == 'deconv':
        sum_over_channels = False
        normalize_heatmap  = True

    # post-process the relevance values
    heatmaps = process_raw_heatmaps(backward, normalize=normalize_heatmap, sum_over_channels=sum_over_channels)

    for im_idx in range(num_images):
        
        if classind == -1:
            print('top class!')
            target_index = top_predictions[im_idx]
        else:
            target_index = classind

        # stretch input to input dimensions (only for visualization)
        stretched_input = transform_input(example_images[im_idx], False, False, in_hei = in_hei, in_wid = in_wid, mean=cropped_mean)
        heatmap = heatmaps[im_idx]

        # presentation
        plt.subplot(1,2,1)
        plt.title('Prediction: {}'.format(top_predictions[im_idx]))
        plt.imshow(stretched_input)
        plt.axis('off')

        # normalize heatmap for visualization
        max_abs = np.max(np.absolute(heatmap))
        norm = mpl.colors.Normalize(vmin = -max_abs, vmax = max_abs)

        plt.subplot(1,2,2)

        if lrp_type in ['epsilon', 'alphabeta', 'eps', 'ab']:
            plt.title('{}-LRP heatmap for class {}'.format(lrp_type, target_index))

        if lrp_type in ['eps_n_flat', 'eps_n_square', 'std_n_ab']:
            if lrp_type == 'eps_n_flat':
                first_method    = 'epsilon'
                second_method   = 'wflat'

            elif lrp_type == 'eps_n_square':
                first_method    = 'epsilon'
                second_method   = 'wsquare'

            elif lrp_type == 'std_n_ab':
                first_method    = 'epsilon'
                second_method   = 'alphabeta'

            plt.title('LRP heatmap for class {}\nstarting with {}\n {} from layer {} on.'.format(target_index, first_method, second_method, switch_layer))

        if sum_over_channels:
            # relevance values are averaged over the pixel channels, use a 1-channel colormap (seismic)
            plt.imshow(heatmap[...,0], cmap='seismic', norm=norm, interpolation='none')
        else:
            # 1 relevance value per color channel
            heatmap = normalize_color_hm(heatmap)
            plt.imshow(heatmap, interpolation = 'none')

        plt.axis('off')
        plt.show()
예제 #56
0
                    help='no of data loading workers')
parser.add_argument('--batch-size',
                    default=64,
                    type=int,
                    help='mini-batch size')
parser.add_argument('--gpu', default=0, type=int, help='GPU id to use.')
parser.add_argument('--print-freq',
                    default=10,
                    type=int,
                    help='print frequency')

if __name__ == "__main__":

    args = parser.parse_args()

    model = load_model(args.model_name)

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    distortions = [
        'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
        'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
        'brightness', 'contrast', 'elastic_transform', 'pixelate',
        'jpeg_compression'
    ]

    distortion_errors = []

    for distortion_name in distortions:
예제 #57
0
def main():
  model = utils.load_model(args)
  new_model = fc_decomposition(model, args)
  new_model.save(args.save_model)
예제 #58
0
ts6 = 9
ts7 = 12
ts8 = 14
ts9 = 15
mask_ts_[mask_tiles == ts1] = 1
mask_ts_[mask_tiles == ts2] = 1
mask_ts_[mask_tiles == ts3] = 1
mask_ts_[mask_tiles == ts4] = 1
mask_ts_[mask_tiles == ts5] = 1
mask_ts_[mask_tiles == ts6] = 1
mask_ts_[mask_tiles == ts7] = 1
mask_ts_[mask_tiles == ts8] = 1
mask_ts_[mask_tiles == ts9] = 1

#% Load model
model = load_model(filepath+'unet_exp_'+str(exp)+'.h5', compile=False)
area = 11
# Prediction
patch_size = 128
ts_tiles = [ts1, ts2, ts3, ts4, ts5, ts6, ts7, ts8, ts9]
patches_test, patches_test_ref = patch_tiles_prediction(ts_tiles, mask_ts_, image_array, image_ref, None, patch_size, stride=patch_size)


result = model.predict(patches_test)
patches_pred = np.argmax(result, axis=-1)

true_labels = np.reshape(patches_test_ref, (patches_test_ref.shape[0]* patches_test_ref.shape[1]*patches_test_ref.shape[2]))
predicted_labels = np.reshape(patches_pred, (patches_pred.shape[0]* patches_pred.shape[1]*patches_pred.shape[2]))

# Metrics
metrics = compute_metrics(true_labels,predicted_labels)
예제 #59
0
def binary_classification(image_list,video,video_status,configuration):
    rejected_folder = os.path.join(video_folder,"rejected")
    accepted_folder = os.path.join(video_folder,"accepted")
    allframes_folder = os.path.join(video_folder,"allframes")

    n = len(image_list)

    if not video_status.get("binary_classification",False):
        logging.info("STEP 2: performing binary classification...")
        #load the models
        binary_model = utils.load_model(configuration.model.classifier + ".json",configuration.model.classifier + ".h5")
         
        #try to find each configuration.frame_step frame
        rejected = 0
        accepted = 0
        
        target_size = (configuration.input.binary_width,configuration.input.binary_height)

        #applying frame_step
        images_selected = []
        for i in range(0,n,configuration.frame_step):
            images_selected += [image_list[i]]
            
        n_selected = len(images_selected)
        batch_size = configuration.batch_size
        batches = len(images_selected) // batch_size + 1
    
        #print (len(images_selected),batch_size,batches)

        for i in range(batches):
            starting = i*batch_size
            ending = min(starting+batch_size,n_selected)


            #print (i,starting,ending)
            
            images_batch = np.empty((ending-starting,3,configuration.input.binary_height,configuration.input.binary_width))

        
            for k in range(starting,ending):
                #load the image
                image_name = images_selected[k]
                image = utils.load_image_opencv(image_name)
                image = cv2.resize(image,target_size) 
                image = np.moveaxis(image,-1,0)
                image = image[np.newaxis,:]

                
                images_batch[k-starting,:,:,:] = image
                
            predictions = prediction.predict_accepted_rejected_batch(images_batch,binary_model,configuration)
            
            for k in range(starting,ending):
                image_name = images_selected[k]
                p = predictions[k-starting]
            
                if p == prediction.REJECTED:
                    logging.debug("image REJECTED: %s",image_name)
                    rejected += 1
                    #copy rejected file to rejected folder
                    shutil.copy(image_name,rejected_folder)
                else:
                    accepted += 1
                    logging.debug("image ACCEPTED: %s",image_name)
                    shutil.copy(image_name,accepted_folder)

        logging.info("STEP 3: report...")
        logging.info("frames in total: {0}".format(len(image_list)))
        logging.info("rejected frames: {0}".format(rejected))
        logging.info("accepted frames: {0}".format(accepted))    

        video_status["binary_classification"] = True
        video_status["window_classification"] = False
        
        config.save_video_status(video_folder,video_status)
    else:
        logging.info("STEP 2: binary classification skipped...")
예제 #60
0
def train(args,
          dataloader_train,
          model,
          feature_map,
          dataloader_validate=None):
    # initialize optimizer
    optimizer = get_optimizer(model, args)

    scheduler = get_scheduler(model, optimizer, args)

    if args.load_model:
        load_model(args.load_model_path, args.device, model, optimizer,
                   scheduler)
        print('Model loaded')

        epoch = get_model_attribute('epoch', args.load_model_path, args.device)
    else:
        epoch = 0

    if args.log_tensorboard:
        writer = SummaryWriter(log_dir=args.tensorboard_path + args.fname +
                               ' ' + args.time,
                               flush_secs=5)
    else:
        writer = None

    while epoch < args.epochs:
        loss, acc = train_epoch(epoch, args, model, dataloader_train,
                                optimizer, scheduler, feature_map, writer)
        epoch += 1
        print('Epoch: {}/{}, train loss: {:.3f}, accuray: {:.3f}'.format(
            epoch, args.epochs, loss, acc))

        # logging
        if args.log_tensorboard:
            writer.add_scalar(
                '{} {} Loss/train'.format(args.note, args.graph_type), loss,
                epoch)

        # save model checkpoint
        if args.save_model and epoch != 0 and epoch % args.epochs_save == 0:
            save_model(epoch,
                       args,
                       model,
                       optimizer,
                       scheduler,
                       feature_map=feature_map)
            print('Model Saved - Epoch: {}/{}, train loss: {:.6f}'.format(
                epoch, args.epochs, loss))

        if dataloader_validate is not None and epoch % args.epochs_validate == 0:
            loss_validate = test_data(args, model, dataloader_validate,
                                      feature_map)
            if args.log_tensorboard:
                writer.add_scalar(
                    '{} {} Loss/validate'.format(args.note, args.graph_type),
                    loss_validate, epoch)
            else:
                print('Epoch: {}/{}, validation loss: {:.6f}'.format(
                    epoch, args.epochs, loss_validate))

    save_model(epoch,
               args,
               model,
               optimizer,
               scheduler,
               feature_map=feature_map)
    print('Model Saved - Epoch: {}/{}, train loss: {:.6f}'.format(
        epoch, args.epochs, loss))