Beispiel #1
0
def SVM(para, train_data, train_labels, testing_data, testing_labels):
    """ train a support vector machine models and evaluate on testing data

	Args:
	--------
	train_data: log sequence matrix for training
	train_labels: labels for training
	testing_data: log sequence matrix for testing/evaluation
	testing_labels: labels for testing/evaluation

	Returns:
	--------
	precision: The evaluation metric Precision
	recall: The evaluation metric Recall
	f1_score: The evaluation metric F1_score
	"""

    print("Train a SVM Model")
    clf = svm.LinearSVC(penalty='l1',
                        tol=0.0001,
                        C=1,
                        dual=False,
                        fit_intercept=True,
                        intercept_scaling=1,
                        class_weight='balanced',
                        max_iter=1000)
    clf = clf.fit(train_data, train_labels.ravel())
    prediction = list(clf.predict(testing_data))
    assert len(prediction) == len(testing_labels)

    if para['cross_validate']:
        ev.cv_evaluate(clf, train_data, train_labels)
    else:
        ev.evaluate(testing_labels, prediction)
Beispiel #2
0
def anomaly_detection(weigh_data, label_data, C, threshold):
	""" detect anomalies by projecting into a subspace with C

	Args:
	--------
	weigh_data: weighted raw data
	label_data: the labels list
	threshold: used as the threshold, which determines the anomalies
	C: the projection matrix

	Returns:
	--------

	"""
	print ('there are %d anomalies' % (sum(label_data)))
	event_num, inst_size  = weigh_data.shape
	predict_results = np.zeros((inst_size),int)
	print('the threshold is %f' % (threshold))
	for i in range(inst_size):
		ya = np.dot(C,weigh_data[:,i])
		SPE = np.dot(ya,ya)
		if SPE > threshold:
			predict_results[i] = 1	#1 represent failure
	assert len(label_data) == len(predict_results)
	ev.evaluate(label_data, predict_results)
Beispiel #3
0
def evaluate(event_count_matrix, invar_dict, groundtruth_labels):
	""" evaluate the results with mined invariants

	Args:
	--------
	event_count_matrix: the input event count matrix
	invar_dict: the dictionary of invariants
	groundtruth_labels: the groundtruth labels for evaluation

	Returns:
	--------
	"""
	print("the mined {} invariants are: {}".format(len(invar_dict), invar_dict))
	valid_col_list = []
	valid_invar_list = []
	for key in invar_dict:
		valid_col_list.append(list(key))
		valid_invar_list.append(list(invar_dict[key]))

	prediction = []
	for row in event_count_matrix:
		label = 0
		for i, cols in enumerate(valid_col_list):
			sum_of_invar = 0
			for j, c in enumerate(cols):
				sum_of_invar += valid_invar_list[i][j] * row[c]
			if sum_of_invar != 0:
				label = 1
				break
		prediction.append(label)

	assert len(groundtruth_labels) == len(prediction)
	ev.evaluate(groundtruth_labels, prediction)
Beispiel #4
0
def anomalyDetect(para, succ_index_list, fail_index_list, train_base_data, train_online_data, testing_data, train_online_label, testing_label, total_inst_num ):
	fail_data = train_base_data[fail_index_list,:]
	succ_dta = train_base_data[succ_index_list,:]
	print(fail_data.shape, succ_dta.shape)
	assert fail_data.shape[0] + succ_dta.shape[0] == train_base_data.shape[0]

	# clustering
	fail_cluster_results, fail_index_per_clu = clustering(para, fail_index_list, fail_data)
	print('failure data clustering finished...')
	succ_cluster_results, succ_index_per_clu = clustering(para, succ_index_list, succ_dta)
	print('success data clustering finished...')

	# extract representatives for each cluster of data
	dis_sum_list = np.zeros(total_inst_num)  # A one dimension list of all zero with size of totalLineNum
	fail_repre = extract_repre(train_base_data, fail_index_per_clu, dis_sum_list)
	succ_repre = extract_repre(train_base_data, succ_index_per_clu, dis_sum_list)

	# online learning
	train_base_size = train_base_data.shape[0]
	online_learn(para, train_online_data, train_online_label, dis_sum_list, fail_repre, succ_repre, fail_index_per_clu, succ_index_per_clu, train_base_data, train_base_size)

	# detect anomalies
	predict_label = detect(para, fail_repre, succ_repre, testing_data)
	assert len(testing_label) == len(predict_label)
	ev.evaluate(testing_label, predict_label)
Beispiel #5
0
def anomaly_detection(weigh_data, label_data, C, threshold):
    """ detect anomalies by projecting into a subspace with C

	Args:
	--------
	weigh_data: weighted raw data
	label_data: the labels list
	threshold: used as the threshold, which determines the anomalies
	C: the projection matrix

	Returns:
	--------

	"""
    print('there are %d anomalies' % (sum(label_data)))
    event_num, inst_size = weigh_data.shape
    predict_results = np.zeros((inst_size), int)
    print('the threshold is %f' % (threshold))
    for i in range(inst_size):
        ya = np.dot(C, weigh_data[:, i])
        SPE = np.dot(ya, ya)
        if SPE > threshold:
            predict_results[i] = 1  #1 represent failure
    assert len(label_data) == len(predict_results)
    ev.evaluate(label_data, predict_results)
Beispiel #6
0
def main(_run):
    args = Namespace(**_run.config)
    logger.info(args)
    training_data, validation_data, test_data = load_input(args)
    model = choose_model(args, training_data, validation_data)
    # TEST
    evaluate(args, model, training_data, validation_data, test_data)
Beispiel #7
0
def evaluate(event_count_matrix, invar_dict, groundtruth_labels):
    """ evaluate the results with mined invariants

	Args:
	--------
	event_count_matrix: the input event count matrix
	invar_dict: the dictionary of invariants
	groundtruth_labels: the groundtruth labels for evaluation

	Returns:
	--------
	"""
    print("the mined {} invariants are: {}".format(len(invar_dict),
                                                   invar_dict))
    valid_col_list = []
    valid_invar_list = []
    for key in invar_dict:
        valid_col_list.append(list(key))
        valid_invar_list.append(list(invar_dict[key]))

    prediction = []
    for row in event_count_matrix:
        label = 0
        for i, cols in enumerate(valid_col_list):
            sum_of_invar = 0
            for j, c in enumerate(cols):
                sum_of_invar += valid_invar_list[i][j] * row[c]
            if sum_of_invar != 0:
                label = 1
                break
        prediction.append(label)

    assert len(groundtruth_labels) == len(prediction)
    ev.evaluate(groundtruth_labels, prediction)
    return prediction
Beispiel #8
0
def train_model(model, optimizer, criterion, train_loader, val_loader,
                scheduler, grad_acc_steps, metric, exp_path):

    n_cycles = len(scheduler.cycle_lens)
    best_auc, best_dice, best_cycle = 0, 0, 0
    is_better, best_monitoring_metric = compare_op(metric)

    for cycle in range(n_cycles):
        print('Cycle {:d}/{:d}'.format(cycle + 1, n_cycles))
        # prepare next cycle:
        # reset iteration counter
        scheduler.last_epoch = -1
        # update number of iterations
        scheduler.T_max = scheduler.cycle_lens[cycle] * len(train_loader)

        # train one cycle, retrieve segmentation data and compute metrics at the end of cycle
        tr_logits, tr_labels, tr_loss = train_one_cycle(
            train_loader, model, criterion, optimizer, scheduler,
            grad_acc_steps, cycle)
        # classification metrics at the end of cycle
        tr_auc, tr_dice = evaluate(
            tr_logits, tr_labels,
            model.n_classes)  # for n_classes>1, will need to redo evaluate
        del tr_logits, tr_labels
        with torch.no_grad():
            assess = True
            vl_logits, vl_labels, vl_loss = run_one_epoch(val_loader,
                                                          model,
                                                          criterion,
                                                          assess=assess)
            vl_auc, vl_dice = evaluate(
                vl_logits, vl_labels,
                model.n_classes)  # for n_classes>1, will need to redo evaluate
            del vl_logits, vl_labels
        print(
            'Train/Val Loss: {:.4f}/{:.4f}  -- Train/Val AUC: {:.4f}/{:.4f}  -- Train/Val DICE: {:.4f}/{:.4f} -- LR={:.6f}'
            .format(tr_loss, vl_loss, tr_auc, vl_auc, tr_dice, vl_dice,
                    get_lr(optimizer)).rstrip('0'))

        # check if performance was better than anyone before and checkpoint if so
        if metric == 'auc':
            monitoring_metric = vl_auc
        elif metric == 'tr_auc':
            monitoring_metric = tr_auc
        elif metric == 'loss':
            monitoring_metric = vl_loss
        elif metric == 'dice':
            monitoring_metric = vl_dice
        if is_better(monitoring_metric, best_monitoring_metric):
            print('Best {} attained. {:.2f} --> {:.2f}'.format(
                metric, 100 * best_monitoring_metric, 100 * monitoring_metric))
            best_auc, best_dice, best_cycle = vl_auc, vl_dice, cycle + 1
            best_monitoring_metric = monitoring_metric
            if exp_path is not None:
                print(15 * '-', ' Checkpointing ', 15 * '-')
                save_model(exp_path, model, optimizer)

    del model
    torch.cuda.empty_cache()
    return best_auc, best_dice, best_cycle
Beispiel #9
0
def logsitic_regression(para, train_data, train_labels, testing_data,
                        testing_labels):
    """ train a logistic regression models and evaluate on testing data

	Args:
	--------
	train_data: log sequence matrix for training
	train_labels: labels for training
	testing_data: log sequence matrix for testing/evaluation
	testing_labels: labels for testing/evaluation

	Returns:
	--------
	precision: The evaluation metric Precision
	recall: The evaluation metric Recall
	f1_score: The evaluation metric F1_score
	"""

    print("Train a Logistic Regression Model")
    clf = LogisticRegression(C=100,
                             penalty='l1',
                             tol=0.01,
                             class_weight='balanced',
                             multi_class='ovr')
    clf = clf.fit(train_data, train_labels.ravel())
    prediction = list(clf.predict(testing_data))
    assert len(prediction) == len(testing_labels)

    if para['cross_validate']:
        ev.cv_evaluate(clf, train_data, train_labels)
    else:
        ev.evaluate(testing_labels, prediction)
Beispiel #10
0
def decision_tree(para, train_data, train_labels, testing_data,
                  testing_labels):
    """ train a decision tree models and evaluate on testing data

	Args:
	--------
	train_data: log sequence matrix for training
	train_labels: labels for training
	testing_data: log sequence matrix for testing/evaluation
	testing_labels: labels for testing/evaluation

	Returns:
	--------
	precision: The evaluation metric Precision
	recall: The evaluation metric Recall
	f1_score: The evaluation metric F1_score
	"""

    print("Train a Decision Tree Model")
    clf = tree.DecisionTreeClassifier()
    clf = clf.fit(train_data, train_labels)
    prediction = list(clf.predict(testing_data))
    assert len(prediction) == len(testing_labels)

    if para['cross_validate']:
        ev.cv_evaluate(clf, train_data, train_labels)
    else:
        ev.evaluate(testing_labels, prediction)
def train(model, optimizer, criterion, train_loader, val_loader, n_epochs, patience, decay_f, data_out, n_epochs_gen):
    counter_since_checkpoint = 0
    tr_losses, tr_aucs, tr_dices, vl_losses, vl_aucs, vl_dices = [], [], [], [], [], []
    is_better, best_monitoring_metric = compare_op('auc')

    for epoch in range(n_epochs):
        print('\n EPOCH: {:d}/{:d}'.format(epoch+1, n_epochs))
        # train one epoch
        train_logits, train_labels, train_loss = run_one_epoch(train_loader, model, criterion, optimizer)
        train_auc, train_dice = evaluate(train_logits, train_labels, model.n_classes)

        # validate one epoch, note no optimizer is passed
        with torch.no_grad():
            val_logits, val_labels, val_loss = run_one_epoch(val_loader, model, criterion)
            val_auc, val_dice = evaluate(val_logits, val_labels, model.n_classes)
        print('Train/Val Loss: {:.4f}/{:.4f}  -- Train/Val AUC: {:.4f}/{:.4f}  -- Train/Val DICE: {:.4f}/{:.4f} -- LR={:.6f}'.format(
                train_loss, val_loss, train_auc, val_auc, train_dice, val_dice, get_lr(optimizer)).rstrip('0'))

        # store performance for this epoch
        tr_aucs.append(train_auc)
        vl_aucs.append(val_auc)
        #  smooth val values with a moving average before comparing
        val_auc = ewma(vl_aucs)[-1]

        # check if performance was better than anyone before and checkpoint if so
        monitoring_metric = val_auc

        if is_better(monitoring_metric, best_monitoring_metric):
            print('Best (smoothed) val {} attained. {:.4f} --> {:.4f}'.format(
                'auc', best_monitoring_metric, monitoring_metric))
            best_monitoring_metric = monitoring_metric
            counter_since_checkpoint = 0  # reset patience
        else:
            counter_since_checkpoint += 1

        if decay_f != 0 and counter_since_checkpoint == 3*patience//4:
            reduce_lr(optimizer, epoch, factor=decay_f, verbose=False)
            print(8 * '-', ' Reducing LR now ', 8 * '-')

        # early stopping if no improvement happened for `patience` epochs
        if counter_since_checkpoint == patience:
            print('\n Early stopping the training, trained for {:d} epochs'.format(epoch))
            del model
            torch.cuda.empty_cache()
            return
        # create examples at this point
        if epoch != 0 and (epoch+1) % n_epochs_gen == 0:
            print('\n Generating examples for model trained for {:d} epochs'.format(epoch+1))
            build_examples(train_loader, model, epoch+1, data_out)
            build_examples(val_loader, model, epoch+1, data_out)
    del model
    torch.cuda.empty_cache()
    return
Beispiel #12
0
def test_trained_model():
    print("Loading the best trained model")
    saved_obj = torch.load(cfg.checkpoint_name,
                           map_location=lambda storage, loc: storage)
    model = saved_obj['model'].to(device)
    model.beam_search_decoding = True
    model.beam_size = int(cfg.beam_size)
    model.beam_search_length_norm_factor = float(
        cfg.beam_search_length_norm_factor)
    model.beam_search_coverage_penalty_factor = float(
        cfg.beam_search_coverage_penalty_factor)
    # SRC = saved_obj['field_src']
    TGT = saved_obj['field_tgt']
    evaluate(val_iter, TGT, model, src_val_file_address, tgt_val_file_address,
             "VALIDATE")
    evaluate(test_iter, TGT, model, src_test_file_address,
             tgt_test_file_address, "TEST")
Beispiel #13
0
def test(model,params):
    model.eval()
    test_output,test_target= get_predictions(model, params, split = 'test')    
    
    if params.label == 'emotion': 
        test_output = test_output.reshape_as(test_target)      
         
    performances = evaluate(params,test_output,test_target)
    return performances
def test_trained_model():
    print("Loading the best trained model")
    saved_obj = torch.load("../.checkpoints/"+cfg.checkpoint_name, map_location=lambda storage, loc: storage)
    model = saved_obj['model'].to(device)
    if 'training_evaluation_results' in saved_obj:
        print("Comma separated greedy decoding validation set BlueP1 scores collected during training:\n\t ===> {}".format(
            ",".join(["{:.2f}".format(x) for x in saved_obj['training_evaluation_results']])))
    model.beam_search_decoding = True
    model.beam_size = int(cfg.beam_size)
    model.beam_search_length_norm_factor = float(cfg.beam_search_length_norm_factor)
    model.beam_search_coverage_penalty_factor = float(cfg.beam_search_coverage_penalty_factor)
    SRC = saved_obj['field_src']
    TGT = saved_obj['field_tgt']
    print("Model loaded, total number of parameters: {}".format(sum([p.numel() for p in model.parameters()])))
    dp = DataProvider(SRC, TGT, load_train_data=False)
    nuance = str(int(time.time()))
    evaluate(dp.val_iter, dp, model, dp.processed_data.addresses.val.src, dp.processed_data.addresses.val.tgt,
             "VALID.{}".format(dp.val_iter.dataset.name), save_decoded_sentences=True, nuance=nuance)
    for test_iter, s, t in zip(dp.test_iters, dp.processed_data.addresses.tests.src, dp.processed_data.addresses.tests.tgt):
        evaluate(test_iter, dp, model, s, t, "TEST.{}".format(test_iter.dataset.name), save_decoded_sentences=True, nuance=nuance)
Beispiel #15
0
def train_cross_val(KFolds, config, data, metrics, metrics_CV2, results, results_CV2):
    # Set random seed
    if (config.seed != None) and isinstance(config.seed, int): 
        tf.set_random_seed(config.seed)
        np.random.seed(config.seed)

    # Ensure that the model will fit in GPU memory
    good_batch_size = False 

    # Check whether weight files exist before starting, if evaluate flag is active
    if (config.evaluate): check_weights_exist(config, KFolds)

    for fold in range(len(KFolds)):
        # Keep track of training time
        t_start = time.time()
        (train_keys, test_keys)  = KFolds[fold]
        # (train_keys, valid_keys) = shuffle_split_array(train_keys, config.val_split)

        # Retrieve fold information
        # execinfo = FoldKeys(train_keys=train_keys, valid_keys=valid_keys, test_keys=test_keys)
        execinfo = ExecutionInformation(config, fold, train_keys, test_keys, config.evaluate)

        # Train a single fold
        good_batch_size = train_fold(config, data, execinfo, metrics, metrics_CV2, results, results_CV2, good_batch_size, True)

        # Check total training time
        time_elapsed = time.time() - t_start
        print('\n * Training complete in {:.0f}m {:.0f}s\n'.format(time_elapsed // 60, time_elapsed % 60))

    # Retrieve patient ID from lead data
    IDs = np.asarray(list(set(train_keys.tolist() + test_keys.tolist()))) # Avoid duplicates

    # Sanity check
    if len(IDs) != len(train_keys) + len(test_keys):
        raise ValueError("Some record shared between train and test!")

    # Evaluation of whole dataset
    execinfo = ExecutionInformation(config, None, None, IDs, True)
    evaluate(None, config, data, execinfo, metrics, metrics_CV2, results, results_CV2, False)
Beispiel #16
0
def train_fold(config, data, execinfo, metrics, metrics_CV2, results, results_CV2, good_batch_size, recompute):
    ############################# MODEL CREATION ############################
    # Clear cache
    keras.backend.clear_session()

    # Define the model according to the configuration
    # if config.strategy == 'single': model = arch.FlatNet(config).create_model()
    # else:                           model = arch2D.FlatNet(config).create_model()
    model = arch.FlatNet(config).create_model()

    # Select optimizer and loss + compile
    optim   = select_optimizer(config.optimizer.lower())(lr=config.learning_rate)
    loss    = select_loss(config.loss.lower())
    model.compile(optimizer=optim, loss=loss)

    write_summary(execinfo.summary, model)

    # If chosen and exists, load weights (fine-tuning, etc.)
    if (config.load_weights or config.evaluate) and os.path.exists(execinfo.state):
        model.load_weights(execinfo.state)

    # If the flag to evaluate has not been set, train the model
    if not config.evaluate:
        # Data generators
        GeneratorTrain = DataGenerator(execinfo.train, config, data)
        GeneratorValid = DataGenerator(execinfo.valid, config, data)

        # keras-specific train
        good_batch_size = train_epochs(config, model, data, execinfo, results, results_CV2, good_batch_size, GeneratorTrain, GeneratorValid)

        # Evaluate model
        if config.splitting.lower() == "cross_validation":
            evaluate(model, config, data, execinfo, metrics, metrics_CV2, results, results_CV2, recompute)
    else:
        if config.splitting.lower() == "cross_validation":
            evaluate(model, config, data, execinfo, metrics, metrics_CV2, results, results_CV2, recompute)

    return good_batch_size
Beispiel #17
0
def train_all(IDs, config, data, metrics, metrics_CV2, results, results_CV2):
    # Set random seed
    if (config.seed != None) and isinstance(config.seed, int): 
        tf.set_random_seed(config.seed)
        np.random.seed(config.seed)

    # Keep track of training time
    t_start = time.time()

    # "iif" evaluate, test on whole database: useful for testing on other DB's
    if config.evaluate:
        # Define the model according to the configuration
        # if config.strategy == 'single': model = arch.FlatNet(config).create_model()
        # else:                           model = arch2D.FlatNet(config).create_model()
        model = arch.FlatNet(config).create_model()

        # Select optimizer and loss + compile
        optim   = select_optimizer(config.optimizer.lower())(lr=config.learning_rate)
        loss    = select_loss(config.loss.lower())
        model.compile(optimizer=optim, loss=loss)

        execinfo = ExecutionInformation(config, None, None, IDs, True) # Define execution
        evaluate(model, config, data, execinfo, metrics, metrics_CV2, results, results_CV2, True) # Evaluate
    else:
        # Retrieve execution information
        execinfo = ExecutionInformation(config, None, IDs, None, False)

        # Check whether weight files exist before starting, if evaluate flag is active
        if (config.evaluate) and (not os.path.exists(execinfo.state)):
            raise FileNotFoundError("Weights file not found")

        # Train a single ""fold""
        _ = train_fold(config, data, execinfo, metrics, metrics_CV2, results, results_CV2, False, True)

    # Check total training time
    time_elapsed = time.time() - t_start
    print('\n * Training complete in {:.0f}m {:.0f}s\n'.format(time_elapsed // 60, time_elapsed % 60))
Beispiel #18
0
def test(model, params):
    model.eval()
    test_output, test_target, test_mask = get_predictions(model,
                                                          params,
                                                          split='test')

    test_target = torch.argmax(test_target.reshape(-1, params.output_dim), -1)
    test_output = test_output.reshape(-1, params.output_dim)
    if not params.dialogue_context:
        nonzero_idx = test_mask.view(-1).nonzero()[:, 0]
        test_output = test_output[nonzero_idx]
        test_target = test_target[nonzero_idx]

    performances = evaluate(params, test_output, test_target)

    return performances
Beispiel #19
0
 def on_epoch_end(self, epoch, logs=None):
     self.keras_ema.assign_shadow_weights()
     logits1, logits2, _, _ = self.model.predict(
         x=[
             dev_data['context_id'], dev_data['question_id'],
             dev_data['context_char_id'], dev_data['question_char_id']
         ],
         batch_size=config.batch_size,
         verbose=1)
     all_results = []
     for i, qid in enumerate(dev_data['qid']):
         start_logits = logits1[i, :]
         end_logits = logits2[i, :]
         all_results.append(
             RawResult(qid=qid,
                       start_logits=start_logits,
                       end_logits=end_logits))
     output_prediction_file = os.path.join(config.path,
                                           'output_prediction.json')
     output_nbest_file = os.path.join(config.path, 'output_nbest.json')
     write_predictions(eval_examples,
                       eval_features,
                       all_results,
                       n_best_size=20,
                       max_answer_length=config.ans_limit,
                       do_lower_case=False,
                       output_prediction_file=output_prediction_file,
                       output_nbest_file=output_nbest_file)
     metrics = evaluate('original_data/dev-v1.1.json',
                        output_prediction_file, None)
     ems.append(metrics['exact'])
     f1s.append(metrics['f1'])
     result = pd.DataFrame([ems, f1s], index=['em', 'f1']).transpose()
     result.to_csv('logs/result_' + config.name + '.csv', index=None)
     if f1s[-1] > self.max_f1:
         self.max_f1 = f1s[-1]
         model.save_weights(
             os.path.join(config.path,
                          'QANet_model_' + config.name + '.h5'))
     model.load_weights(self.keras_ema.temp_model)
Beispiel #20
0
def run(qf, gf, q_pids, g_pids, q_camids, g_camids, t=5, device=None):
    """
    :param qf: torch.Tensor(q, m)
    :param gf: torch.Tensor(g, m)
    :param q_pids: torch.Tensor(q)
    :param g_pids: torch.Tensor(g)
    :param q_camids: torch.Tensor(q)
    :param g_camids: torch.Tensor(g)
    :return:
    """
    # These will be initialized automatically
    positive_indices = None
    negative_indices = None
    distmat = None
    for _ in tqdm(range(t)):
        res = rocchio_round(qf, gf, q_pids, g_pids, positive_indices, negative_indices,
                            previous_distmat=distmat, device=device)
        distmat, positive_indices, negative_indices, matches = res
        del matches
    result = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, device=device)
    print("Results after {} rounds of Rocchio:".format(t), "mAP", result[1], "mINP", result[2])
    return distmat
Beispiel #21
0
def run(qf,
        gf,
        q_pids,
        g_pids,
        q_camids,
        g_camids,
        t=5,
        method="min",
        device=None):
    positive_indices = None
    negative_indices = None
    distmat = None
    distmat_qg = None
    for _ in tqdm(range(t)):
        res = ne_round(qf,
                       gf,
                       q_pids,
                       g_pids,
                       positive_indices,
                       negative_indices,
                       distmat,
                       distmat_qg,
                       method=method,
                       verbose=0,
                       device=device)
        distmat, positive_indices, negative_indices, distmat_qg, matches = res
        del matches
    del res, distmat_qg, positive_indices, negative_indices
    result = evaluate(distmat,
                      q_pids,
                      g_pids,
                      q_camids,
                      g_camids,
                      device=device)
    print(
        "Results after {} rounds of neighborhood expansion ({}):".format(
            t, method), "mAP", result[1], "mINP", result[2])
    return distmat
Beispiel #22
0
# Select a subset 100 samples (10 for each per label)
train_subset = split(train_set, 100, 'Balanced')

##########

checkpoint_count = 1

for epoch in tqdm(range(epochs)):
    for data in tqdm(train_loader):
        trainer.train_step(data)

    if epoch % evaluate_every == 0:
        # Compute train and test_accuracy of a logistic regression
        train_accuracy, test_accuracy = evaluate(encoder=trainer.encoder,
                                                 train_on=train_subset,
                                                 test_on=test_set,
                                                 device=device)
        if not (writer is None):
            writer.add_scalar(tag='evaluation/train_accuracy',
                              scalar_value=train_accuracy,
                              global_step=trainer.iterations)
            writer.add_scalar(tag='evaluation/test_accuracy',
                              scalar_value=test_accuracy,
                              global_step=trainer.iterations)

        tqdm.write('Train Accuracy: %f' % train_accuracy)
        tqdm.write('Test Accuracy: %f' % test_accuracy)

    if epoch % checkpoint_every == 0:
        tqdm.write('Storing model checkpoint')
        while os.path.isfile(
Beispiel #23
0
def train(conf, _model):

    if conf['rand_seed'] is not None:
        np.random.seed(conf['rand_seed'])

    if not os.path.exists(conf['save_path']):
        os.makedirs(conf['save_path'])

    # load data
    print('starting loading data')
    print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
    train_data, val_data, test_data = pickle.load(open(conf["data_path"],
                                                       'rb'))
    print('finish loading data')
    print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))

    val_batches = reader.build_batches(val_data, conf)

    print("finish building test batches")
    print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))

    # refine conf
    batch_num = len(train_data['y']) / conf["batch_size"]
    val_batch_num = len(val_batches["response"])

    conf["train_steps"] = conf["num_scan_data"] * batch_num
    conf["save_step"] = int(max(1, batch_num / 10))
    conf["print_step"] = int(max(1, batch_num / 100))

    print('configurations: %s' % conf)

    print('model sucess')
    print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))

    _graph = _model.build_graph()
    print('build graph sucess')
    print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))

    with tf.Session(graph=_graph) as sess:
        _model.init.run()
        if conf["init_model"]:
            _model.saver.restore(sess, conf["init_model"])
            print("sucess init %s" % conf["init_model"])

        average_loss = 0.0
        batch_index = 0
        step = 0
        best_result = [0, 0, 0, 0]

        for step_i in xrange(conf["num_scan_data"]):
            #for batch_index in rng.permutation(range(batch_num)):
            print('starting shuffle train data')
            shuffle_train = reader.unison_shuffle(train_data)
            train_batches = reader.build_batches(shuffle_train, conf)
            print('finish building train data')
            for batch_index in range(batch_num):

                feed = {
                    _model.turns:
                    train_batches["turns"][batch_index],
                    _model.tt_turns_len:
                    train_batches["tt_turns_len"][batch_index],
                    _model.every_turn_len:
                    train_batches["every_turn_len"][batch_index],
                    _model.response:
                    train_batches["response"][batch_index],
                    _model.response_len:
                    train_batches["response_len"][batch_index],
                    _model.label:
                    train_batches["label"][batch_index]
                }

                batch_index = (batch_index + 1) % batch_num

                _, curr_loss = sess.run([_model.g_updates, _model.loss],
                                        feed_dict=feed)

                average_loss += curr_loss

                step += 1

                if step % conf["print_step"] == 0 and step > 0:
                    g_step, lr = sess.run(
                        [_model.global_step, _model.learning_rate])
                    print('step: %s, lr: %s' % (g_step, lr))
                    print("processed: [" + str(step * 1.0 / batch_num) +
                          "] loss: [" +
                          str(average_loss / conf["print_step"]) + "]")
                    average_loss = 0

                if step % conf["save_step"] == 0 and step > 0:
                    index = step / conf['save_step']
                    score_file_path = conf['save_path'] + 'score.' + str(index)
                    score_file = open(score_file_path, 'w')
                    print('save step: %s' % index)
                    print(
                        time.strftime('%Y-%m-%d %H:%M:%S',
                                      time.localtime(time.time())))

                    for batch_index in xrange(val_batch_num):

                        feed = {
                            _model.turns:
                            val_batches["turns"][batch_index],
                            _model.tt_turns_len:
                            val_batches["tt_turns_len"][batch_index],
                            _model.every_turn_len:
                            val_batches["every_turn_len"][batch_index],
                            _model.response:
                            val_batches["response"][batch_index],
                            _model.response_len:
                            val_batches["response_len"][batch_index],
                            _model.label:
                            val_batches["label"][batch_index]
                        }

                        scores = sess.run(_model.logits, feed_dict=feed)

                        for i in xrange(conf["batch_size"]):
                            score_file.write(
                                str(scores[i]) + '\t' +
                                str(val_batches["label"][batch_index][i]) +
                                '\n')
                    score_file.close()

                    #write evaluation result
                    result = eva.evaluate(score_file_path)
                    result_file_path = conf["save_path"] + "result." + str(
                        index)
                    with open(result_file_path, 'w') as out_file:
                        for p_at in result:
                            out_file.write(str(p_at) + '\n')
                    print('finish evaluation')
                    print(
                        time.strftime('%Y-%m-%d %H:%M:%S',
                                      time.localtime(time.time())))

                    if result[1] + result[2] > best_result[1] + best_result[2]:
                        best_result = result
                        _save_path = _model.saver.save(
                            sess, conf["save_path"] + "model.ckpt." +
                            str(step / conf["save_step"]))
                        print("succ saving model in " + _save_path)
                        print(
                            time.strftime('%Y-%m-%d %H:%M:%S',
                                          time.localtime(time.time())))
Beispiel #24
0
def run(args):
    if args.gpu_id:
        setup_gpu(args.gpu_id, args.verbose)

    seed = args.seed or np.random.randint(1000)

    # documentation setup
    timestamp = args.timestamp or datetime.now().strftime("%Y%m%d%H%M%S")
    outputs_dir = Path(
        __file__
    ).parent / 'runs' / args.method / args.experiment_id / '{}{}_{}_{}'.format(
        args.source, args.target, seed, timestamp)
    checkpoints_dir = outputs_dir / 'checkpoints'
    checkpoints_dir.mkdir(parents=True, exist_ok=True)
    checkpoints_path = checkpoints_dir / 'cp-best.ckpt'
    tensorboard_dir = outputs_dir / 'logs'
    tensorboard_dir.mkdir(parents=True, exist_ok=True)
    config_path = outputs_dir / 'config.json'
    model_path = outputs_dir / 'model.json'
    report_path = outputs_dir / 'report.json'
    report_val_path = outputs_dir / 'report_validation.json'

    save_json(args.__dict__, config_path)

    features_config = load_json(Path('configs/features.json').absolute())

    # prepare data
    preprocess_input = {
        'vgg16':
        lambda x: keras.applications.vgg16.preprocess_input(x, mode='tf'),
        'resnet101v2':
        lambda x: keras.applications.resnet_v2.preprocess_input(
            x, mode='tf'
        ),  #NB: tf v 1.15 has a minor bug in keras_applications.resnet. Fix: change the function signature to "def preprocess_input(x, **kwargs):""
        'none':
        lambda x: x[features_config[args.features]["mat_key"]],
        **{k: lambda x: x
           for k in ['conv2', 'lenetplus']},
    }[args.model_base] or None

    if all([
            name in dsg.OFFICE_DATASET_NAMES
            for name in [args.source, args.target]
    ]):
        # office data
        INPUT_SHAPE = tuple(features_config[args.features]["shape"])
        CLASS_NAMES = dsg.office31_class_names()
        OUTPUT_SHAPE = len(CLASS_NAMES)
        ds = dsg.office31_datasets(source_name=args.source,
                                   target_name=args.target,
                                   preprocess_input=preprocess_input,
                                   shape=INPUT_SHAPE,
                                   seed=seed,
                                   features=args.features,
                                   test_as_val=args.test_as_val)

    elif all([
            name in dsg.DIGIT_DATASET_NAMES
            for name in [args.source, args.target]
    ]):
        INPUT_SHAPE = dsg.digits_shape(args.source,
                                       args.target,
                                       mode=args.resize_mode)
        CLASS_NAMES = dsg.digits_class_names()
        OUTPUT_SHAPE = len(CLASS_NAMES)
        ds = dsg.digits_datasets(
            source_name=args.source,
            target_name=args.target,
            num_source_samples_per_class=args.num_source_samples_per_class,
            num_target_samples_per_class=args.num_target_samples_per_class,
            num_val_samples_per_class=args.num_val_samples_per_class,
            seed=seed,
            test_as_val=args.test_as_val,
            input_shape=INPUT_SHAPE,
            standardize_input=args.standardize_input,
        )

    else:
        raise Exception(
            "The source and target datasets should come from either Office31 or Digits"
        )

    source_all_ds, source_all_size = ds['source']['full']
    source_train_ds, source_train_size = ds['source']['train']
    target_train_ds, target_train_size = ds['target']['train']
    target_val_ds, target_val_size = ds['target']['val']
    target_test_ds, target_test_size = ds['target']['test']
    test_size = target_test_size

    if args.test_as_val:
        target_val_ds = target_test_ds
        target_val_size = target_test_size

    val_ds, val_size = {
        **{
            k: lambda: (target_val_ds, target_val_size)
            for k in ['tune_source', 'tune_target']
        },
        **{
            k: lambda: dsg.da_pair_repeat_dataset(target_val_ds, target_val_size)
            for k in ['ccsa', 'dsne', 'dage', 'multitask']
        },
        **{
            k: lambda: dsg.da_pair_alt_repeat_dataset(target_val_ds, target_val_size)
            for k in ['dage_a']
        },
    }[args.method]()

    train_ds, train_size = {
        'tune_source': lambda: (source_all_ds, source_all_size),
        'tune_target': lambda: (target_train_ds, target_train_size),
        **{
            k: lambda: dsg.da_pair_dataset(source_ds=source_train_ds,
                                           target_ds=target_train_ds,
                                           ratio=args.ratio,
                                           shuffle_buffer_size=args.shuffle_buffer_size)
            for k in ['ccsa', 'dsne', 'dage', 'multitask']
        },
        **{
            k: lambda: dsg.da_pair_alt_dataset(source_ds=source_train_ds,
                                               target_ds=target_train_ds,
                                               ratio=args.ratio,
                                               shuffle_buffer_size=args.shuffle_buffer_size)
            for k in ['dage_a']
        },
    }[args.method]()

    # prep data
    test_ds = dsg.prep_ds(dataset=target_test_ds,
                          batch_size=args.batch_size,
                          shuffle_buffer_size=args.shuffle_buffer_size)
    val_ds = dsg.prep_ds(dataset=val_ds,
                         batch_size=args.batch_size,
                         shuffle_buffer_size=args.shuffle_buffer_size)
    train_ds = dsg.prep_ds_train(dataset=train_ds,
                                 batch_size=args.batch_size,
                                 shuffle_buffer_size=args.shuffle_buffer_size)

    # prepare optimizer
    optimizer = {
        'sgd':
        lambda: keras.optimizers.SGD(learning_rate=args.learning_rate,
                                     momentum=args.momentum,
                                     nesterov=True,
                                     clipvalue=10,
                                     decay=args.learning_rate_decay),
        'adam':
        lambda: keras.optimizers.Adam(learning_rate=args.learning_rate,
                                      beta_1=args.momentum,
                                      beta_2=0.999,
                                      amsgrad=False,
                                      clipvalue=10,
                                      decay=args.learning_rate_decay),
        'rmsprop':
        lambda: keras.optimizers.RMSprop(learning_rate=args.learning_rate,
                                         clipvalue=10,
                                         decay=args.learning_rate_decay),
    }[args.optimizer]()

    # prepare model
    model_base = {
        'vgg16':
        lambda: keras.applications.vgg16.VGG16(
            input_shape=INPUT_SHAPE, include_top=False, weights='imagenet'),
        'resnet101v2':
        lambda: keras.applications.resnet_v2.ResNet101V2(
            input_shape=INPUT_SHAPE, include_top=False, weights='imagenet'),
        'conv2':
        lambda: models.common.conv2_block(input_shape=INPUT_SHAPE,
                                          l2=args.l2,
                                          dropout=args.dropout / 2,
                                          batch_norm=args.batch_norm),
        'lenetplus':
        lambda: models.common.lenetplus_conv_block(input_shape=INPUT_SHAPE,
                                                   l2=args.l2,
                                                   dropout=args.dropout / 2,
                                                   batch_norm=args.batch_norm),
        'none':
        lambda i=keras.layers.Input(shape=INPUT_SHAPE): keras.models.Model(
            inputs=i, outputs=i),
    }[args.model_base]()

    aux_loss = {
        **{
            k: lambda: losses.dummy_loss
            for k in ['dummy', 'tune_source', 'tune_target', 'multitask']
        },
        'ccsa':
        lambda: losses.contrastive_loss(margin=args.connection_filter_param),
        'dsne':
        lambda: losses.dnse_loss(margin=args.connection_filter_param),
        'dage':
        lambda: losses.dage_loss(
            connection_type=args.connection_type,
            weight_type=args.weight_type,
            filter_type=args.connection_filter_type,
            penalty_filter_type=args.penalty_connection_filter_type,
            filter_param=args.connection_filter_param,
            penalty_filter_param=args.penalty_connection_filter_param),
    }[args.method]()

    (model, model_test) = {
        'single_stream':
        lambda: models.single_stream.model(model_base=model_base,
                                           input_shape=INPUT_SHAPE,
                                           output_shape=OUTPUT_SHAPE,
                                           num_unfrozen_base_layers=args.
                                           num_unfrozen_base_layers,
                                           optimizer=optimizer,
                                           dense_size=args.dense_size,
                                           embed_size=args.embed_size,
                                           l2=args.l2,
                                           dropout=args.dropout),
        'two_stream_pair_embeds':
        lambda: models.two_stream_pair_embeds.model(
            model_base=model_base,
            input_shape=INPUT_SHAPE,
            output_shape=OUTPUT_SHAPE,
            num_unfrozen_base_layers=args.num_unfrozen_base_layers,
            dense_size=args.dense_size,
            embed_size=args.embed_size,
            optimizer=optimizer,
            batch_size=args.batch_size,
            aux_loss=aux_loss,
            loss_alpha=args.loss_alpha,
            loss_weights_even=args.loss_weights_even,
            l2=args.l2,
            batch_norm=args.batch_norm,
            dropout=args.dropout),
    }[args.architecture]()

    val_freq = 3 if args.test_as_val else 1

    train = {
        'regular':
        partial(models.common.train,
                checkpoints_path=checkpoints_path,
                val_freq=val_freq),
        'flipping':
        partial(models.common.train,
                checkpoints_path=checkpoints_path,
                val_freq=val_freq,
                flipping=True),
        'batch_repeat':
        partial(models.common.train,
                checkpoints_path=checkpoints_path,
                batch_repeats=args.batch_repeats),
        'gradual_unfreeze':
        partial(models.common.train_gradual_unfreeze,
                model_base_name=args.model_base,
                checkpoints_path=checkpoints_path,
                architecture=args.architecture),
    }[args.training_regimen]

    if args.from_weights:
        weights_path = args.from_weights
        model.load_weights(str(weights_path))

    if args.verbose:
        model.summary()
        # keras.utils.plot_model( #type: ignore
        #     model,
        #     to_file=(Path(__file__).parent /'model.png').absolute(),
        #     show_shapes=True,
        #     show_layer_names=True,
        #     rankdir='TB',
        #     expand_nested=True,
        #     dpi=96
        # )

    with open(model_path, 'w') as f:
        f.write(model.to_json())

    monitor = {
        **{k: 'val_'
           for k in ['tune_source', 'tune_target']},
        **{
            k: 'val_preds_'
            for k in ['ccsa', 'dsne', 'dage', 'dage_a', 'multitask']
        },
    }[args.method] + args.monitor

    fit_callbacks = callbacks(checkpoints_path,
                              tensorboard_dir,
                              monitor=monitor,
                              verbose=args.verbose)

    augment = lambda x: x
    if args.augment:
        if args.features != 'images':
            raise ValueError('augment=1 is only allowed for features="images"')
        augment = {
            **{
                k: partial(dsg.augment,
                           batch_size=args.batch_size,
                           input_shape=INPUT_SHAPE)
                for k in ['tune_source', 'tune_target']
            },
            **{
                k: partial(dsg.augment_pair,
                           batch_size=args.batch_size,
                           input_shape=INPUT_SHAPE)
                for k in ['ccsa', 'dsne', 'dage', 'dage_a', 'multitask']
            },
        }[args.method]

    # perform training and test
    if 'train' in args.mode:
        start_time = timer()
        train(model=model,
              datasource=augment(train_ds),
              datasource_size=train_size,
              val_datasource=val_ds,
              val_datasource_size=val_size,
              epochs=args.epochs,
              batch_size=args.batch_size,
              callbacks=fit_callbacks,
              verbose=args.verbose)
        train_time = timer() - start_time
        if args.verbose:
            print("Completed training in {} seconds".format(train_time))

    result = 0

    if 'validate' in args.mode:
        result = evaluate(model=model_test,
                          test_dataset=val_ds,
                          test_size=val_size,
                          batch_size=args.batch_size,
                          report_path=report_val_path,
                          verbose=args.verbose,
                          target_names=CLASS_NAMES)

    if 'test' in args.mode:
        result = evaluate(model=model_test,
                          test_dataset=test_ds,
                          test_size=test_size,
                          batch_size=args.batch_size,
                          report_path=report_path,
                          verbose=args.verbose,
                          target_names=CLASS_NAMES)

    if args.delete_checkpoint:
        try:
            rmtree(str(checkpoints_dir.resolve()))
        except:
            pass

    return result['accuracy']  # type:ignore
Beispiel #25
0
def train(params, model):
    criterion = get_criterion(params)
    #unitary_parameters = get_unitary_parameters(model)
    if hasattr(model, 'get_params'):
        unitary_params, remaining_params = model.get_params()
    else:
        remaining_params = model.parameters()
        unitary_params = []

    if len(unitary_params) > 0:
        unitary_optimizer = RMSprop_Unitary(unitary_params,
                                            lr=params.unitary_lr)

    #remaining_parameters = get_remaining_parameters(model,unitary_parameters)
    optimizer = torch.optim.RMSprop(remaining_params, lr=params.lr)

    # Temp file for storing the best model
    temp_file_name = str(int(np.random.rand() * int(time.time())))
    params.best_model_file = os.path.join('tmp', temp_file_name)

    best_val_loss = 99999.0
    #    best_val_loss = -1.0
    for i in range(params.epochs):
        print('epoch: ', i)
        model.train()
        with tqdm(total=params.train_sample_num) as pbar:
            time.sleep(0.05)
            for _i, data in enumerate(
                    params.reader.get_data(iterable=True,
                                           shuffle=True,
                                           split='train'), 0):
                #                For debugging, please run the line below
                #                _i,data = next(iter(enumerate(params.reader.get_data(iterable = True, shuffle = True, split = 'train'),0)))

                b_inputs = [inp.to(params.device) for inp in data[:-1]]
                b_targets = data[-1].to(params.device)

                # Does not train if batch_size is 1, because batch normalization will crash
                if b_inputs[0].shape[0] == 1:
                    continue

                optimizer.zero_grad()
                if len(unitary_params) > 0:
                    unitary_optimizer.zero_grad()

                outputs = model(b_inputs)
                b_targets, outputs, loss = get_loss(params, criterion, outputs,
                                                    b_targets, b_inputs[-1])
                if np.isnan(loss.item()):
                    torch.save(model, params.best_model_file)
                    raise Exception('loss value overflow!')
                    #break
                loss.backward()
                nn.utils.clip_grad_norm_(model.parameters(), params.clip)
                optimizer.step()
                if len(unitary_params) > 0:
                    unitary_optimizer.step()

                # Compute Training Accuracy
                n_total = len(outputs)
                n_correct = (outputs.argmax(dim=-1) == b_targets).sum().item()
                train_acc = n_correct / n_total

                #Update Progress Bar
                pbar.update(params.batch_size)
                ordered_dict = {'acc': train_acc, 'loss': loss.item()}
                pbar.set_postfix(ordered_dict=ordered_dict)

        model.eval()

        #################### Compute Validation Performance##################
        val_output, val_target, val_mask = get_predictions(model,
                                                           params,
                                                           split='dev')

        val_target, val_output, val_loss = get_loss(params, criterion,
                                                    val_output, val_target,
                                                    val_mask)

        print('validation performance:')
        performances = evaluate(params, val_output, val_target)

        print('val_acc = {}, val_loss = {}'.format(performances['acc'],
                                                   val_loss))
        ##################################################################
        if val_loss < best_val_loss:
            torch.save(model, params.best_model_file)
            print('The best model up till now. Saved to File.')
            best_val_loss = val_loss
def experiment_vae(args, train_loader, val_loader, test_loader, model, optimizer, dir, model_name='vae'):
    from utils.training import train_vae as train
    from utils.evaluation import evaluate_vae as evaluate

    # SAVING
    torch.save(args, dir + args.model_name + '.config')

    # best_model = model
    best_loss = 100000.
    e = 0
    train_loss_history = []
    train_re_history = []
    train_kl_history = []

    val_loss_history = []
    val_re_history = []
    val_kl_history = []

    time_history = []

    for epoch in range(1, args.epochs + 1):
        time_start = time.time()
        model, train_loss_epoch, train_re_epoch, train_kl_epoch = train(epoch, args, train_loader, model,
                                                                             optimizer)

        val_loss_epoch, val_re_epoch, val_kl_epoch = evaluate(args, model, train_loader, val_loader, epoch, dir, mode='validation')
        time_end = time.time()

        time_elapsed = time_end - time_start

        # appending history
        train_loss_history.append(train_loss_epoch), train_re_history.append(train_re_epoch), train_kl_history.append(
            train_kl_epoch)
        val_loss_history.append(val_loss_epoch), val_re_history.append(val_re_epoch), val_kl_history.append(
            val_kl_epoch)
        time_history.append(time_elapsed)

        # printing results
        print('Epoch: {}/{}, Time elapsed: {:.2f}s\n'
              '* Train loss: {:.2f}   (RE: {:.2f}, KL: {:.2f})\n'
              'o Val.  loss: {:.2f}   (RE: {:.2f}, KL: {:.2f})\n'
              '--> Early stopping: {}/{} (BEST: {:.2f})\n'.format(
            epoch, args.epochs, time_elapsed,
            train_loss_epoch, train_re_epoch, train_kl_epoch,
            val_loss_epoch, val_re_epoch, val_kl_epoch,
            e, args.early_stopping_epochs, best_loss
        ))

        # early-stopping
        if val_loss_epoch < best_loss:
            e = 0
            best_loss = val_loss_epoch
            # best_model = model
            print('->model saved<-')
            torch.save(model, dir + args.model_name + '.model')
        else:
            e += 1
            if epoch < args.warmup:
                e = 0
            if e > args.early_stopping_epochs:
                break

        # NaN
        if math.isnan(val_loss_epoch):
            break

    # FINAL EVALUATION
    best_model = torch.load(dir + args.model_name + '.model')
    test_loss, test_re, test_kl, test_log_likelihood, train_log_likelihood, test_elbo, train_elbo = evaluate(args, best_model, train_loader, test_loader, 9999, dir, mode='test')

    print('FINAL EVALUATION ON TEST SET\n'
          'LogL (TEST): {:.2f}\n'
          'LogL (TRAIN): {:.2f}\n'
          'ELBO (TEST): {:.2f}\n'
          'ELBO (TRAIN): {:.2f}\n'
          'Loss: {:.2f}\n'
          'RE: {:.2f}\n'
          'KL: {:.2f}'.format(
        test_log_likelihood,
        train_log_likelihood,
        test_elbo,
        train_elbo,
        test_loss,
        test_re,
        test_kl
    ))

    with open('vae_experiment_log.txt', 'a') as f:
        print('FINAL EVALUATION ON TEST SET\n'
          'LogL (TEST): {:.2f}\n'
          'LogL (TRAIN): {:.2f}\n'
          'ELBO (TEST): {:.2f}\n'
          'ELBO (TRAIN): {:.2f}\n'
          'Loss: {:.2f}\n'
          'RE: {:.2f}\n'
          'KL: {:.2f}'.format(
        test_log_likelihood,
        train_log_likelihood,
        test_elbo,
        train_elbo,
        test_loss,
        test_re,
        test_kl
        ), file=f)

    # SAVING
    torch.save(train_loss_history, dir + args.model_name + '.train_loss')
    torch.save(train_re_history, dir + args.model_name + '.train_re')
    torch.save(train_kl_history, dir + args.model_name + '.train_kl')
    torch.save(val_loss_history, dir + args.model_name + '.val_loss')
    torch.save(val_re_history, dir + args.model_name + '.val_re')
    torch.save(val_kl_history, dir + args.model_name + '.val_kl')
    torch.save(test_log_likelihood, dir + args.model_name + '.test_log_likelihood')
    torch.save(test_loss, dir + args.model_name + '.test_loss')
    torch.save(test_re, dir + args.model_name + '.test_re')
    torch.save(test_kl, dir + args.model_name + '.test_kl')
Beispiel #27
0
def train(conf, _model):
    
    if conf['rand_seed'] is not None:
        np.random.seed(conf['rand_seed'])

    if not os.path.exists(conf['save_path']):
        os.makedirs(conf['save_path'])

    # load data
    print('starting loading data')
    print(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))
    train_data, val_data, test_data = pickle.load(open(conf["data_path"], 'rb')) 
    print('train:', len(train_data['y'])) 
    print('dev:', len(val_data['y']))  
    print('test:', len(test_data['y']))
    print('finish loading data')
    print(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))

    val_batches = reader.build_batches('train',val_data, conf)

    print("finish building test batches")
    print(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))

    # refine conf
    batch_num = int(len(train_data['y']) / conf["batch_size"])
    val_batch_num = len(val_batches["response"])

    conf["train_steps"] = conf["num_scan_data"] * batch_num
    conf["save_step"] = int(max(1, batch_num / 10))
    conf["print_step"] = int(max(1, batch_num / 100))

    print('configurations: %s' %conf)

    print('begin build model')
    print(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))

    _graph = _model.build_graph()
    print('build graph sucess')
    print(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))

    with tf.Session(graph=_graph) as sess:
        #writer = tf.summary.FileWriter("logs/", sess.graph) # for tensorboard 
        # summary writer
        '''
        train_summary_dir = os.path.join(conf["save_path"], "summaries", "train")
        train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)

        dev_summary_dir = os.path.join(conf["save_path"], "summaries", "dev")
        dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)
        '''

        _model.init.run()

        if not conf["init_model"]:
            emb_feed = {_model.emb_placeholder: _model._word_embedding_init}
            sess.run(_model.emb_init, feed_dict=emb_feed)

        if conf["init_model"]:
            _model.saver.restore(sess, conf["init_model"])
            print("sucess init %s" %conf["init_model"])

        average_loss = 0.0
        batch_index = 0
        step = 0
        best_result = [0, 0, 0, 0]
        #best_result = [0, 0, 0, 0, 0, 0] # eva matrix: p1(2),p1(10),p2(10),p5(10)

        for step_i in range(conf["num_scan_data"]): # each epoch
            print('starting shuffle train data')
            shuffle_train = reader.unison_shuffle(train_data)
            train_batches = reader.build_batches('train',shuffle_train, conf)
            print('finish building train data')
            for batch_index in range(batch_num): # each batch

                feed = {
                    _model.turns: train_batches["turns"][batch_index], 
                    _model.tt_turns_len: train_batches["tt_turns_len"][batch_index],
                    _model.every_turn_len: train_batches["every_turn_len"][batch_index],
                    _model.response: train_batches["response"][batch_index], 
                    _model.response_len: train_batches["response_len"][batch_index],
                    _model.label: train_batches["label"][batch_index],
                    _model.dropout_keep_prob: conf["dropout_keep_prob"]
                }

                batch_index = (batch_index + 1) % batch_num;

                _, curr_loss, summaries = sess.run([_model.g_updates, _model.loss, _model.train_summary_op], feed_dict = feed)

                # summary
                #train_summary_writer.add_summary(summaries, step)

                
                average_loss += curr_loss

                step += 1

                if step % conf["print_step"] == 0 and step > 0:
                    g_step, lr = sess.run([_model.global_step, _model.learning_rate])
                    print('step: %s, lr: %s' %(g_step, lr))
                    print("processed: [" + str(step * 1.0 / batch_num) + "] loss: [" + str(average_loss / conf["print_step"]) + "]")
                    average_loss = 0

                
                if step % conf["save_step"] == 0 and step > 0:
                    index = step / conf['save_step']
                    score_file_path = conf['save_path'] + 'score.' + str(index)
                    score_file = open(score_file_path, 'w')
                    print('save step: %s' %index)
                    print(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))

                    for batch_index in range(val_batch_num):
                
                        feed = { 
                            _model.turns: val_batches["turns"][batch_index],
                            _model.tt_turns_len: val_batches["tt_turns_len"][batch_index],
                            _model.every_turn_len: val_batches["every_turn_len"][batch_index],
                            _model.response: val_batches["response"][batch_index],
                            _model.response_len: val_batches["response_len"][batch_index],
                            _model.label: val_batches["label"][batch_index],
                            _model.dropout_keep_prob: 1.0
                        }
                
                        scores, dev_loss, summaries = sess.run([_model.logits, _model.loss, _model.dev_summary_op], feed_dict = feed)

                        # summary
                        #dev_summary_writer.add_summary(summaries, step)

                    
                        for i in range(len(scores)): # logit, true_label
                            score_file.write(
                                str(scores[i]) + '\t' + 
                                str(val_batches["label"][batch_index][i]) + '\n')
                    score_file.close()

                    #write evaluation result
                    result = eva.evaluate(score_file_path)
                    result_file_path = conf["save_path"] + "result." + str(index)
                    with open(result_file_path, 'w') as out_file:
                        for p_at in result:
                            out_file.write(str(p_at) + '\n')
                    print('finish evaluation')
                    print(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))

                    if result[1] + result[2] > best_result[1] + best_result[2]: # for ubuntu
                    #if result[2] + result[3] > best_result[2] + best_result[3]: # for douban
                        best_result = result
                        _save_path = _model.saver.save(sess, conf["save_path"] + "model.ckpt." + str(step / conf["save_step"]))
                        print("succ saving model in " + _save_path)
                        print(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))
Beispiel #28
0
def test(conf, _model):

    if not os.path.exists(conf['save_path']):
        os.makedirs(conf['save_path'])

    # load data
    print('starting loading data')
    print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
    train_data, val_data, test_data = pickle.load(open(conf["data_path"],
                                                       'rb'))
    print('finish loading data')

    test_batches = reader.build_batches(test_data, conf)

    print("finish building test batches")
    print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))

    # refine conf
    test_batch_num = int(len(test_batches["response"]))

    print('configurations: %s' % conf)

    _graph = _model.build_graph()
    print('build graph sucess')
    print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))

    with tf.Session(graph=_graph) as sess:
        #_model.init.run();
        _model.saver.restore(sess, conf["init_model"])
        print("sucess init %s" % conf["init_model"])

        batch_index = 0
        step = 0

        score_file_path = conf['save_path'] + 'score.test'
        score_file = open(score_file_path, 'w')

        print('starting test')
        print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
        for batch_index in range(test_batch_num):

            feed = {
                _model.turns: test_batches["turns"][batch_index],
                _model.tt_turns_len: test_batches["tt_turns_len"][batch_index],
                _model.every_turn_len:
                test_batches["every_turn_len"][batch_index],
                _model.response: test_batches["response"][batch_index],
                _model.response_len: test_batches["response_len"][batch_index],
                _model.label: test_batches["label"][batch_index]
            }

            scores = sess.run(_model.logits, feed_dict=feed)

            for i in range(conf["batch_size"]):
                score_file.write(
                    str(scores[i]) + '\t' +
                    str(test_batches["label"][batch_index][i]) + '\n')
                #str(sum(test_batches["every_turn_len"][batch_index][i]) / test_batches['tt_turns_len'][batch_index][i]) + '\t' +
                #str(test_batches['tt_turns_len'][batch_index][i]) + '\n')

        score_file.close()
        print('finish test')
        print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))

        #write evaluation result
        result = eva.evaluate(score_file_path)
        result_file_path = conf["save_path"] + "result.test"
        with open(result_file_path, 'w') as out_file:
            for p_at in result:
                out_file.write(str(p_at) + '\n')
        print('finish evaluation')
        print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
Beispiel #29
0
def test(conf, _model):
    
    if not os.path.exists(conf['save_path']):
        os.makedirs(conf['save_path'])

    # load data
    print('starting loading data')
    print(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))
    data_collections = pickle.load(open(conf["data_path"], 'rb'))
    print('finish loading data')

    file_names = ["train.mix", "valid.mix", "test.mix"]

    test_data = data_collections[file_names.index("test.mix")]

    score_test = "score.test"


    test_batches = reader.build_batches(test_data, conf)

    print("finish building test batches")
    print(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))

    # refine conf
    test_batch_num = len(test_batches["response"])

    print('configurations: %s' %conf)


    _graph = _model.build_graph()
    print('build graph sucess')
    print(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))

    from tensorflow.python import debug as tf_debug

    with tf.Session(graph=_graph) as sess:

        _model.init.run()
        _model.saver.restore(sess, conf["init_model"])
        print("sucess init %s" %conf["init_model"])

        test_type = conf["train_type"]
        logits = _model.trainops[test_type]["logits"]

        score_file_path = conf['save_path'] + '/' + score_test
        score_file = open(score_file_path, 'w')

        print(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())), 'starting test')
        for batch_index in range(test_batch_num):
            feed = {
                _model.turns1: test_batches["turns1"][batch_index],
                _model.turns2: test_batches["turns2"][batch_index],
                _model.turnsf: test_batches["turnsf"][batch_index],
                _model.tt_turns_len1: test_batches["tt_turns_len1"][batch_index],
                _model.every_turn_len1: test_batches["every_turn_len1"][batch_index],
                _model.tt_turns_len2: test_batches["tt_turns_len2"][batch_index],
                _model.every_turn_len2: test_batches["every_turn_len2"][batch_index],
                _model.tt_turns_lenf: test_batches["tt_turns_lenf"][batch_index],
                _model.every_turn_lenf: test_batches["every_turn_lenf"][batch_index],
                _model.response: test_batches["response"][batch_index], 
                _model.response_len: test_batches["response_len"][batch_index],
                _model.label: test_batches["label"][batch_index],
                _model.turnsa: test_batches["turnsa"][batch_index],
                _model.turnsa_len: test_batches["turnsa_len"][batch_index],
                _model.turnsq: test_batches["turnsq"][batch_index],
                _model.turnsq_len: test_batches["turnsq_len"][batch_index],
                _model.keep_rate: 1.0,
            }

            scores = sess.run(logits, feed_dict = feed)

            for i in range(len(scores)):
                score_file.write(
                    str(scores[i]) + '\t' + 
                    str(test_batches["label"][batch_index][i]) + '\n')

        score_file.close()
        print(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())), 'finish test')

        #write evaluation result
        result = eva.evaluate(score_file_path)
        print("MRR: {:01.4f} P2@1 {:01.4f} R@1 {:01.4f} r@2 {:01.4f} r@5 {:01.4f}".format(*result))
        print(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())), 'finish evaluation')
        # Y_fit = Y_query
        train_occ = np.asarray(Y_query.sum(axis=1)).flatten()

        # convert query playlists to list
        query_playlists = tolist(playlists_idx,
                                 songs_idx,
                                 position,
                                 idx2song,
                                 subset=query_idx)

        # predict song-playlist probabilities
        start = time.time()
        cont_output = compute_membership_fix(playlists=query_playlists,
                                             idx2song=idx2song,
                                             features=features,
                                             my_net=my_net,
                                             random_state=rng)
        print('\nTime predicting: {} sec.'.format(round(
            time.time() - start, 4)))
        # cont_output = np.random.rand(len(idx2song), len(query_playlists))

        # evaluate the continuations
        evaluate(scores=[cont_output.T],
                 targets=[Y_cont.T.tocsr()],
                 queries=[Y_query.T.tocsr()],
                 train_occ=[train_occ],
                 k_list=[10, 30, 100],
                 ci=args.ci,
                 song_occ=args.song_occ,
                 metrics_file=args.metrics_file)
Beispiel #31
0
def main(model_name):
    dp = DataProvider()
    if model_name == "sts":
        model, optimizer, scheduler, grad_clip, step_only_at_eval = create_sts_model(
            dp.SRC, dp.TGT)
    elif model_name == "transformer":
        model, optimizer, scheduler, grad_clip, step_only_at_eval = create_transformer_model(
            Transformer, dp.SRC, dp.TGT)
    elif model_name == "aspect_augmented_transformer":
        model, optimizer, scheduler, grad_clip, step_only_at_eval = create_transformer_model(
            AspectAugmentedTransformer, dp.SRC, dp.TGT)
    elif model_name == "multi_head_aspect_augmented_transformer":
        model, optimizer, scheduler, grad_clip, step_only_at_eval = create_transformer_model(
            MultiHeadAspectAugmentedTransformer, dp.SRC, dp.TGT)
    elif model_name == "syntax_infused_transformer":
        model, optimizer, scheduler, grad_clip, step_only_at_eval = create_transformer_model(
            SyntaxInfusedTransformer, dp.SRC, dp.TGT)
    elif model_name == "bert_freeze_input_transformer":
        model, optimizer, scheduler, grad_clip, step_only_at_eval = create_transformer_model(
            BertFreezeTransformer, dp.SRC, dp.TGT)
    elif model_name == "copy":
        model, optimizer, scheduler, grad_clip, step_only_at_eval = create_copy_model(
            dp.SRC, dp.TGT)
    else:
        raise ValueError("Model name {} is not defined.".format(model_name))
    if not os.path.exists("../.checkpoints/"):
        os.mkdir("../.checkpoints/")
    training_evaluation_results = []
    torch.save(
        {
            'model': model,
            'field_src': dp.SRC,
            'field_tgt': dp.TGT,
            'training_evaluation_results': training_evaluation_results
        }, "../.checkpoints/" + cfg.checkpoint_name)

    if bool(cfg.debug_mode):
        evaluate(dp.val_iter, dp, model, dp.processed_data.addresses.val.src,
                 dp.processed_data.addresses.val.tgt, "INIT")
    best_val_score = 0.0
    assert cfg.update_freq > 0, "update_freq must be a non-negative integer"
    for epoch in range(int(cfg.n_epochs)):
        if epoch == int(cfg.init_epochs) and model_name == "sts":
            optimizer, scheduler = get_a_new_optimizer(cfg.optim,
                                                       cfg.learning_rate,
                                                       model.parameters())
        all_loss = 0.0
        batch_count = 0.0
        all_perp = 0.0
        all_tokens_count = 0.0
        if epoch < 2:
            # after the first iteration it does not need recalculation
            val_indices = [
                int(dp.size_train * x / float(cfg.val_slices))
                for x in range(1, int(cfg.val_slices))
            ]
        ds = tqdm(dp.train_iter, total=dp.size_train, dynamic_ncols=True)
        optimizer.zero_grad()
        for ind, instance in enumerate(ds):
            if instance.src[0].size(0) < 2:
                continue
            pred, _, lss, decoded_length, n_tokens = model(
                instance.src,
                instance.trg,
                test_mode=False,
                **instance.data_args)
            itm = lss.item()
            all_loss += itm
            all_tokens_count += n_tokens
            all_perp += math.exp(itm / max(n_tokens, 1.0))
            batch_count += 1.0
            lss /= (max(decoded_length, 1) * cfg.update_freq)
            lss.backward()
            if grad_clip:
                nn.utils.clip_grad_norm_(model.parameters(),
                                         float(cfg.max_grad_norm))
            if ind % cfg.update_freq == 0:
                """Implementation of gradient accumulation as suggested in https://arxiv.org/pdf/1806.00187.pdf"""
                optimizer.step()
                if not step_only_at_eval:
                    scheduler.step()
                optimizer.zero_grad()
            current_perp = all_perp / batch_count
            if current_perp < 1500:
                ds.set_description(
                    "Epoch: {}, Average Loss: {:.2f}, Average Perplexity: {:.2f}"
                    .format(epoch, all_loss / all_tokens_count, current_perp))
            else:
                ds.set_description("Epoch: {}, Average Loss: {:.2f}".format(
                    epoch, all_loss / all_tokens_count))
            if ind in val_indices:
                val_l, val_bleu = evaluate(dp.val_iter, dp, model,
                                           dp.processed_data.addresses.val.src,
                                           dp.processed_data.addresses.val.tgt,
                                           str(epoch))
                training_evaluation_results.append(val_bleu)
                if val_bleu > best_val_score:
                    torch.save(
                        {
                            'model':
                            model,
                            'field_src':
                            dp.SRC,
                            'field_tgt':
                            dp.TGT,
                            'training_evaluation_results':
                            training_evaluation_results
                        }, "../.checkpoints/" + cfg.checkpoint_name)
                    best_val_score = val_bleu
                if step_only_at_eval:
                    scheduler.step(val_bleu)
        dp.size_train = ind + 1

    if best_val_score > 0.0:
        print(
            "Loading the best validated model with validation bleu score of {:.2f}"
            .format(best_val_score))
        saved_obj = torch.load("../.checkpoints/" + cfg.checkpoint_name,
                               map_location=lambda storage, loc: storage)
        model = saved_obj['model'].to(device)
        # it might not correctly overwrite the vocabulary objects
        SRC = saved_obj['field_src']
        TGT = saved_obj['field_tgt']
        dp.replace_fields(SRC, TGT)
    evaluate(dp.val_iter, dp, model, dp.processed_data.addresses.val.src,
             dp.processed_data.addresses.val.tgt, "LAST")
Beispiel #32
0
import argparse

description = """   This script assumes to be run at 'evaluation' directory.
                    It requires 'lodbrok1.h5' and 'spambrainz_dataset_eval.pickle'
                    to run. The purpose of the script is to show the performance
                    of lodbrok model against evaluation dataset. The output is how
                    well the model performed over evaluation dataset"""

parser = argparse.ArgumentParser(description=description)

args = parser.parse_args()
import sys

sys.path.append("..")
from numpy import loadtxt
from tensorflow.keras.models import load_model
import keras
from utils.evaluation import evaluate, print_stats
# load model
model = load_model('../models/weights/lodbrok1.h5')
# summarize model
model.summary()
eval = evaluate("../models/weights/lodbrok1.h5",
                "../data/spambrainz_dataset_eval.pickle")
print_stats(eval)
def train(conf, _model):

    if conf['rand_seed'] is not None:
        np.random.seed(conf['rand_seed'])

    if not os.path.exists(conf['save_path']):
        os.makedirs(conf['save_path'])

    # config display
    print('configurations: %s' % conf)

    # Data Generate
    with tf.device('/gpu:1'):
        dg = DataGenerator(conf)

    # refine conf
    train_batch_num = int(dg.train_data_size / conf["batch_size"])
    val_batch_num = int(dg.dev_data_size / conf["batch_size"])

    conf["train_steps"] = conf["num_scan_data"] * train_batch_num
    conf["save_step"] = int(max(1, train_batch_num / 10))
    conf["print_step"] = int(max(1, train_batch_num / 100))

    print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),
          ' : Build graph')
    _graph = _model.build_graph()
    print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),
          ' : Build graph sucess')

    config = tf.ConfigProto(allow_soft_placement=True)
    config.gpu_options.allow_growth = True
    with tf.Session(graph=_graph, config=config) as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        if conf["init_model"]:
            _model.saver.restore(sess, conf["init_model"])
            print("sucess init %s" % conf["init_model"])

        average_loss = 0.0
        batch_index = 0
        step = 0
        best_result = [0, 0, 0, 0]

        for step_i in range(conf["num_scan_data"]):
            for batch_index in range(train_batch_num):
                with tf.device('/gpu:1'):
                    turns, turn_num, turn_len, response, response_len, label = dg.train_data_generator(
                        batch_index)
                feed = {
                    _model.turns: turns,
                    _model.turn_num: turn_num,
                    _model.turn_len: turn_len,
                    _model.response: response,
                    _model.response_len: response_len,
                    _model.label: label
                }
                batch_index = (batch_index + 1) % train_batch_num

                _, curr_loss = sess.run([_model.opt, _model.de_loss],
                                        feed_dict=feed)

                average_loss += curr_loss

                step += 1

                if step % conf["print_step"] == 0 and step > 0:
                    print(
                        time.strftime('%Y-%m-%d %H:%M:%S',
                                      time.localtime(time.time())),
                        " processed: [" + str(step * 1.0 / train_batch_num) +
                        "] loss: [" + str(average_loss / conf["print_step"]) +
                        "]")
                    average_loss = 0

                if step % conf["save_step"] == 0 and step > 0:
                    index = step / conf['save_step']
                    dev_score_file_path = conf[
                        'save_path'] + 'dev_score.' + str(index)
                    dev_score_file = open(dev_score_file_path, 'w')
                    print(
                        time.strftime(' %Y-%m-%d %H:%M:%S',
                                      time.localtime(time.time())),
                        '  Save step: %s' % index)

                    # caculate dev score
                    for batch_index in range(val_batch_num):
                        with tf.device('/gpu:1'):
                            turns, turn_num, turn_len, response, response_len, label = dg.dev_data_generator(
                                batch_index)
                        feed = {
                            _model.turns: turns,
                            _model.turn_num: turn_num,
                            _model.turn_len: turn_len,
                            _model.response: response,
                            _model.response_len: response_len,
                            _model.label: label
                        }

                        scores = sess.run(_model.de_logits, feed_dict=feed)

                        for i in range(conf["batch_size"]):
                            for j in range(conf['options_num']):
                                if j == label[i]:
                                    lab = 1
                                else:
                                    lab = 0
                                dev_score_file.write(
                                    str(scores[i][j]) + '\t' + str(lab) + '\n')
                    dev_score_file.close()

                    #write evaluation result
                    dev_result = eva.evaluate(dev_score_file_path)
                    dev_result_file_path = conf[
                        "save_path"] + "dev_result." + str(index)
                    with open(dev_result_file_path, 'w') as out_file:
                        for p_at in dev_result:
                            out_file.write(str(p_at) + '\n')
                    print('finish dev evaluation')
                    print(
                        time.strftime('%Y-%m-%d %H:%M:%S',
                                      time.localtime(time.time())))

                    if dev_result[1] + dev_result[2] > best_result[
                            1] + best_result[2]:
                        best_result = dev_result
                        _save_path = _model.saver.save(
                            sess, conf["save_path"] + "model.ckpt." +
                            str(step / conf["save_step"]))
                        print("succ saving model in " + _save_path)
                        print(
                            time.strftime('%Y-%m-%d %H:%M:%S',
                                          time.localtime(time.time())))
Beispiel #34
0
        else:
            # why Y_query?
            # sim contains song-to-song similarities derived from fit playlists
            # Y_query contains the query playlists where songs have occurred
            # -> the product assigns high score to songs similar to songs that
            # have occurred in the query playlists
            cont_output = sim.dot(Y_query).toarray()

        # factor by popularity if required
        if args.pop:
            cont_output *= train_occ[:, np.newaxis]

        # mask song-playlist continuation pairs involving unknown songs
        mask_array_rows(Y_cont, np.where(train_occ == 0)[0])

        # append arrays re-shaping for evaluation
        cont_output_l.append(cont_output.T)
        Y_cont_l.append(Y_cont.T.tocsr())
        Y_query_l.append(sparse.csr_matrix(Y_query).T.tocsr())
        train_occ_l.append(train_occ)

    # evaluate the continuations
    evaluate(scores=cont_output_l,
             targets=Y_cont_l,
             queries=Y_query_l,
             train_occ=train_occ_l,
             k_list=[10, 30, 100],
             ci=args.ci,
             song_occ=args.song_occ,
             metrics_file=args.metrics_file)