示例#1
0
def submit(request, id):
	# if this page was tried to access while submitting a form (adding submission to a problem)
	if request.method == 'POST':
		form = SubmitSolutionForm(request.POST, request.FILES)
		filename = request.FILES['submission_code'].name

		# If the file is not cpp/python
		if not filename.endswith('.cpp') and not filename.endswith('.py'):
			messages.error(request, 'Wrong file type')

		elif form.is_valid():	
			# create and instance of form but don't save it
			form = form.save(commit=False)
			form.author = request.user
			form.problem_code = problem.objects.get(id=id)
			form.save()

			cur_user = request.user
			cur_user.profile.problems_tried += 1
			cur_prob = problem.objects.get(id=id)
			cur_prob.total_submissions += 1

			verdict = evaluate(form.submission_code, id)
			if verdict == 'AC':
				cur_user.profile.problems_solved += 1
				cur_prob.successful_submissions += 1

			cur_prob.save()
			cur_user.profile.save()
			# print(request.user, '=================================================',cur_user.profile.problems_tried)
			# update all fields of question and user submissions
			messages.success(request, 'Added submission')
		else:
			messages.error(request, 'cant add submission')
	return render(request, 'problems/add_submission.html', {'form': SubmitSolutionForm()})
示例#2
0
def train(args):
    # Get model
    model = models.__dict__[args.model](args)
    if args.ckpt_path:
        model = ModelSaver.load_model(model, args.ckpt_path, args.gpu_ids, is_training=True)
    model = model.to(args.device)
    model.train()

    # Get loader, logger, and saver
    train_loader, val_loader = get_data_loaders(args)
    logger = TrainLogger(args, model, dataset_len=len(train_loader.dataset))
    saver = ModelSaver(args.save_dir, args.max_ckpts, metric_name=args.metric_name,
                       maximize_metric=args.maximize_metric, keep_topk=True)

    # Train
    while not logger.is_finished_training():
        logger.start_epoch()
        for batch in train_loader:
            logger.start_iter()

            # Train over one batch
            model.set_inputs(batch['src'], batch['tgt'])
            model.train_iter()

            logger.end_iter()

            # Evaluate
            if logger.global_step % args.iters_per_eval < args.batch_size:
                criteria = {'MSE_src2tgt': mse, 'MSE_tgt2src': mse}
                stats = evaluate(model, val_loader, criteria)
                logger.log_scalars({'val_' + k: v for k, v in stats.items()})
                saver.save(logger.global_step, model,
                           stats[args.metric_name], args.device)

        logger.end_epoch()
示例#3
0
 def act(self, state: np.ndarray):
     self.simulation_env.reset(state, self.player)
     valid_actions = self.simulation_env.get_valid_actions().nonzero()[0]
     values = np.zeros_like(valid_actions, dtype=np.float)
     for idx, action in enumerate(valid_actions):
         self.simulation_env.step(action)
         next_state = self.simulation_env.get_state()
         value = evaluate(next_state)
         # Multiply with self.player (-1 or 1) so we can always choose the argmax
         values[idx] = value * self.player
         self.simulation_env.reset(state, self.player)
     return valid_actions[np.argmax(values)]
示例#4
0
def do_evaluate(args):
    from evaluation.evaluate import parse_file, evaluate, evaluate_output, print_results, print_relevant
    variables = parse_file(args.file)

    if args.print_relevant:
        print_relevant(**variables)
    else:
        if args.multisearch_result:
            # Skip the query-running step
            with open(args.multisearch_result) as fp:
                output = fp.read()
            results = evaluate_output(output, **variables)

        else:
            # Run the queries
            results = evaluate(**variables)
        print_results(results, args.format)
示例#5
0
def postprocessing_with_evaluation_block1(x_test, y_test, timepoints,
                                          test_file_list, x_challenge,
                                          challenge_file_list, model, param):
    """Post processes test and challenge files. Evaluates new PSDS on test set
    and returns post processed test and prediction data
    """
    prediction = model.predict(x_test)
    challenge_prediction = model.predict(x_challenge)
    if param['post_processing'] == 'no':
        print('No post processing used')
        return prediction, challenge_prediction
    elif param['post_processing'] == 'fill':
        print('Filling post processing used.')
        timethres = param['post_timethres']
        noisethres = param['post_noisethres']
        base = param['post_base']
        post_processed_test = np.array([
            postProcess(pred,
                        timepoints,
                        timeThresh=timethres,
                        noiseThresh=noisethres,
                        base=base) for pred in prediction
        ])
        post_processed_prediction = np.array([
            postProcess(pred,
                        timepoints,
                        timeThresh=timethres,
                        noiseThresh=noisethres,
                        base=base) for pred in challenge_prediction
        ])
        plot_confusion_matrix(y_test, post_processed_test)
        # calculate psds for test set
        df_test_pred = getPredictionAsSequenceDF(post_processed_test,
                                                 timepoints, test_file_list)
        pred_csv_path = param['prediction_path'] + 'test_pred_postprocessed.csv'
        df_test_pred.to_csv(pred_csv_path, index=False)
        #dev_csv_path = param['data_folder'] + param['dev_csv']
        #dev_df = pd.read_csv(dev_csv_path, header=0, usecols=[0, 1, 2, 3])
        #test_df = dev_df[dev_df.filename.isin(test_file_list)]
        test_csv_path = param['prediction_path'] + 'test_ref_current.csv'
        post_processed_psds_info = evaluate.evaluate(pred_csv_path,
                                                     test_csv_path)
        print('PSDS', post_processed_psds_info)
    return post_processed_test, post_processed_prediction, post_processed_psds_info[
        0]
示例#6
0
def evaluation_block1(X_test, Y_test, timepoints, testFileList, model,
                      eval_param):
    """Evaluates current model on test data from dev set. Also calculates
    PSDS score."""
    prediction = model.predict(X_test)
    scores_list = model.evaluate(X_test, Y_test)
    print('')
    print('Evaluation:')
    print('Loss, MAE, Accuracy', scores_list)
    plot_confusion_matrix(Y_test, prediction)
    prediction_df = getPredictionAsSequenceDF(prediction, timepoints,
                                              testFileList)
    pred_csv_path = eval_param['prediction_path'] + 'test_pred_model.csv'
    prediction_df.to_csv(pred_csv_path, index=False)
    #dev_csv_path = eval_param['data_folder'] + eval_param['dev_csv']
    #dev_df = pd.read_csv(dev_csv_path, header=0, usecols=[0, 1, 2, 3])
    #test_df = dev_df[dev_df.filename.isin(testFileList)]
    test_csv_path = eval_param['prediction_path'] + 'test_ref_current.csv'
    psds_info = evaluate.evaluate(pred_csv_path, test_csv_path)
    print('PSDS', psds_info)
    print('')
    return scores_list, psds_info[0]
示例#7
0
import sys
sys.path.append('../')
import logging
import time

from evaluation.evaluate import evaluate

if __name__ == '__main__':
    logger = logging.getLogger('signature')

    logfile = '../../data/log/%d_learnSMF.log'%int(time.time())
    logging.basicConfig(filename = logfile, format='%(asctime)s : %(name)-12s: %(levelname)s : %(message)s')
    logging.root.setLevel(level=logging.DEBUG)
    logger.info("running %s" % ' '.join(sys.argv))
    
    console = logging.StreamHandler()
    console.setLevel(logging.DEBUG)
    # set a format which is simpler for console use
    formatter = logging.Formatter('%(asctime)s : %(name)-12s: %(levelname)-8s %(message)s')
    # tell the handler to use this format
    console.setFormatter(formatter)
    logger.addHandler(console)
    

    #path = '../../data/restaurants/'
    path = '../../data/beautyspa/'
    evaluate(path)
示例#8
0
                    for threshold2 in [0.72]:
                        with Timer('Second step'):
                            edges_new = []
                            edges = np.array(edges)
                            share_num = common_link[
                                edges[:, 0].tolist(),
                                edges[:, 1].tolist()].tolist()[0]
                            edges = edges.tolist()

                            for i in range(len(edges)):
                                if ((link_num[edges[i][0]]) != 0) & (
                                    (link_num[edges[i][1]]) != 0):
                                    if max((share_num[i]) /
                                           link_num[edges[i][0]],
                                           (share_num[i]) /
                                           link_num[edges[i][1]]) > threshold2:
                                        edges_new.append(edges[i])
                                if i % 10000000 == 0:
                                    print(i)

                        with Timer('Last step'):
                            pre_labels = edge_to_connected_graph(
                                edges_new, 584013)
                        gt_labels = np.load('./pretrained_model/gt_labels.npy')
                        print('the threshold1 is:{}'.format(threshold1))
                        print('the threshold2 is:{}'.format(threshold2))
                        evaluate(gt_labels, pre_labels, 'pairwise')
                        evaluate(gt_labels, pre_labels, 'bcubed')
                        evaluate(gt_labels, pre_labels, 'nmi')
示例#9
0
import sys

sys.path.append("../")
import logging
import time

from evaluation.evaluate import evaluate
from params.params import path

if __name__ == "__main__":
    logger = logging.getLogger("signature")

    logfile = "../../data/log/%d_learnSMF.log" % int(time.time())
    logging.basicConfig(filename=logfile, format="%(asctime)s : %(name)-12s: %(levelname)s : %(message)s")
    logging.root.setLevel(level=logging.DEBUG)
    logger.info("running %s" % " ".join(sys.argv))

    console = logging.StreamHandler()
    console.setLevel(logging.DEBUG)
    # set a format which is simpler for console use
    formatter = logging.Formatter("%(asctime)s : %(name)-12s: %(levelname)-8s %(message)s")
    # tell the handler to use this format
    console.setFormatter(formatter)
    logger.addHandler(console)

    evaluate(path, userProfileThres=5)