예제 #1
0
def main(cmd_opt):
    if cmd_opt.zero_shot and cmd_opt.train:
        num_classes = 8
    else:
        num_classes = cmd_opt.numClasses
    if cmd_opt.imageDir is not None and not os.path.exists(cmd_opt.imageDir):
        os.mkdir(cmd_opt.imageDir)
    cifar_loader = CifarLoader(cmd_opt.dataRoot,
                               cmd_opt.pklRoot,
                               cmd_opt.tfRecordRoot,
                               zero_shot=cmd_opt.zero_shot)

    with open(cmd_opt.pklRoot, 'rb') as pickle_file:
        word2vec_dict = pkl.load(pickle_file)
    word2vec_matrix = np.zeros([num_classes, word2vec_embedding_size])
    for key, value in word2vec_dict.iteritems():
        if cmd_opt.zero_shot and cmd_opt.train:
            if key not in heldout_categories.keys():
                word2vec_matrix[categories[key], :] = value
        else:
            word2vec_matrix[categories[key], :] = value
    with tf.Graph().as_default():
        if cmd_opt.train:
            train(cmd_opt, word2vec_matrix, cifar_loader, num_classes,
                  image_size, word2vec_embedding_size, "CifarNet",
                  cmd_opt.validationExamples)
        else:
            eval(cmd_opt, word2vec_matrix, cifar_loader, num_classes,
                 image_size, word2vec_embedding_size, cmd_opt.testExamples,
                 cmd_opt.imageDir)
예제 #2
0
def train_predict(df_r, label_r, users_r, df_t, label_t, users_t, samprob = 0.02, params = {}):
    '''
    训练模型并给出测试结果
    '''
    score = {}
    
    df, label = underSample(df_r, label_r, prob = samprob)
    model = RandomForestClassifier(**params)#RandomForestClassifier(**params)#LogisticRegression()
    model.fit(df.values[:], label.values[:])
 
    pred = model.predict(df_r)
    pred = pd.concat([users_r, pd.DataFrame(pred)], axis =1)
    pred = pred[pred[0] > 0]
    answer = pd.concat([users_r, pd.DataFrame(label)], axis =1)
    answer = answer[answer['label'] > 0]
    pred = pred.drop_duplicates('user_id')

    
    score['train'] = eval.eval(pred, answer)    
    
    pred = model.predict(df_t)
    # print metrics.classification_report(label_t, pred)

    pred = pd.concat([users_t, pd.DataFrame(pred)], axis =1)
    pred = pred[pred[0] > 0]
    answer = pd.concat([users_t, pd.DataFrame(label_t)], axis =1)
    answer = answer[answer['label'] > 0]
    pred = pred.drop_duplicates('user_id')
    
    score['test'] = eval.eval(pred, answer)
    return score, model
예제 #3
0
def main(num_epochs, dataset, lr, batch_size, sheet):
    elapsed_time = 0.0
    print("===> Loading datasets")
    training_data_loader = get_dataset(batch_size, dataset, shuffle=True)
    print("===> Building model")
    print("===> Setting GPU")
    #Two nets. One for before encoding, other for after encoding.
    SR = CNNSRluma().cuda()
    CR = CNNCRluma().cuda()
    criterion = nn.MSELoss().cuda()
    print("===> Setting Optimizer")
    #Both net parameters are going to be trained, thus send them to the optimizer
    optimizer = optim.Adam(list(CR.parameters()) + list(SR.parameters()),
                           lr=lr,
                           betas=(0.9, 0.9),
                           eps=(10**-8))
    print("===> Training")
    #train for num_epochs
    for epoch in range(num_epochs):
        start_time = time.time()
        train(training_data_loader, optimizer, CR, SR, criterion, epoch, sheet)
        path = save_checkpoint(CR, SR, optimizer, epoch)
        #Eval trained nets every epoch
        eval(path, sheet, epoch)
        elapsed_time += time.time() - start_time
        print("Accumulated training time (mins) = {:f}".format(elapsed_time /
                                                               60.0))
예제 #4
0
def rrt():
    path, obstacles = path_generator.generate_rrt(N=2)
    path_kalman = smoothing.kalman(path)
    path_bezier = smoothing.bezier(path)
    path_bezier_sp = smoothing.bezier_divided(path)

    eval.eval(obstacles, path, 'Raw path', path_kalman, 'Kalman Filter Smoothing', path_bezier, 'Bézier Curve', path_bezier_sp, 'Piecewise Bézier Curve')
예제 #5
0
def main():

    ifHash = False

    trainfile = 'yelp_reviews_train.json'
    X, y, top = util.preprocess(trainfile,
                                ifTrain=True,
                                ifHash=ifHash,
                                trainTop=[])

    W = multiLR.BSGD(X, y)
    t, s = multiLR.predict(W, X)
    print eval.eval(t, s, y)

    predfile = 'yelp_reviews_dev.json'
    x, _, _ = util.preprocess(predfile,
                              ifTrain=False,
                              ifHash=ifHash,
                              trainTop=top)

    t, s = multiLR.predict(W, x)

    util.writePred(t, s, 'v7.txt')

    return
예제 #6
0
def main():
    config = yaml.load(open("config.yaml", "r"), Loader=yaml.FullLoader)
    print(config)
    dataset = DataSetWrapper(config['batch_size'], **config['dataset'])

    simclr = SimCLR(dataset, config)
    simclr.train()
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print(device)
    model = simclr.model
    eval(model, './data/', device, config)
예제 #7
0
def test_output(en_is_src, setname, outfolder):
    for lan in LANGUAGES:
        pairname = 'en-' + lan if (en_is_src) else lan + '-en';
        print(pairname, setname); sys.stderr.write(pairname + ".11" + setname + "\n")
        avgbleu_file = os.path.join(BLEUROOT_HK, pairname + '.11' + setname)
        output_file = os.path.join(outfolder, pairname + '.11' + setname)
 
        prds = readnums(output_file)
        labels = readnums(avgbleu_file)

        eval.eval(prds, labels) 
        print(""); print("========"); sys.stderr.write("\n"); sys.stderr.write("===========\n")
예제 #8
0
    def test_eval(self):
        save_filename = 'foo'
        config_path = 'cfg.yml'
        config = load_config(config_path)

        input_config = InputConfig(config['INPUT_CONFIG'])
        model = FafeNet(config)
        torch.save({'model_state_dict': model.state_dict()}, save_filename)

        model_path = save_filename
        data_path = '/Users/erikbohnsack/data'
        eval(model_path=model_path, data_path=data_path)
예제 #9
0
def baseline_output(en_is_src, year, outfolder):
    filedict = getallfilepairs(en_is_src, year)
    for k,v in filedict.items():
        filetag = k+"."+year
        print(filetag); sys.stderr.write(filetag + "\n")
        avgbleu_file = os.path.join(BLEUROOT_HK,filetag)
        output_file = os.path.join(outfolder, filetag)

        prds = readnums(output_file) 
        labels = readnums(avgbleu_file)

        eval.eval(prds, labels)   
        print(""); print("========"); sys.stderr.write("\n"); sys.stderr.write("===========\n")
예제 #10
0
def eval_output(year, cproot):
    filedict = getallfilepairs(True, year)
    for k,v in filedict.items():
        filetag = k+'.'+year
        print(filetag+'\n')
        sys.stderr.write(filetag+'\n')
        
        avgbleu_file = os.path.join(BLEUROOT_HK, filetag)
        predict_file = os.path.join(cproot, filetag)
 
        prds = readnums(predict_file)
        labels = readnums(avgbleu_file)
 
        eval.eval(prds, labels)
        print(""); print("=========="); sys.stderr.write("\n"); sys.stderr.write("============\n"); 
예제 #11
0
def main():
    saver = Saver()
    train_data, val_data, test_data, raw_doc_list = load_data()

    print(train_data.graph.shape)
    if COMET_EXPERIMENT:
        with COMET_EXPERIMENT.train():
            saved_model, model = train(train_data, val_data, saver)
    else:
        saved_model, model = train(train_data, val_data, saver)
    with torch.no_grad():
        test_loss_model, preds_model = model(
            train_data.get_pyg_graph(device=FLAGS.device), test_data)
    eval_res = eval(preds_model, test_data, True)
    y_true = eval_res.pop('y_true')
    y_pred = eval_res.pop('y_pred')
    print("Test...")
    pprint(eval_res)
    if COMET_EXPERIMENT:
        from comet_ml.utils import ConfusionMatrix

        def index_to_example(index):
            test_docs_ids = test_data.node_ids
            return raw_doc_list[test_docs_ids[index]]

        confusion_matrix = ConfusionMatrix(
            index_to_example_function=index_to_example,
            labels=list(test_data.label_dict.keys()))
        confusion_matrix.compute_matrix(y_true, y_pred)

        with COMET_EXPERIMENT.test():
            COMET_EXPERIMENT.log_metrics(eval_res)
            COMET_EXPERIMENT.log_confusion_matrix(
                matrix=confusion_matrix,
                labels=list(test_data.label_dict.keys()))
예제 #12
0
def upload_file(request):
    if request.method == 'POST':
        form = UploadForm(request.POST, request.FILES)
        if form.is_valid():
            form.save()

            fname = str(request.FILES['file'])
            f_path = os.path.join(settings.MEDIA_ROOT, fname)
            img = np.asarray(
                Image.open(f_path).resize([300, 300],
                                          Image.ANTIALIAS).convert('RGB'))
            h, w, ch = np.shape(img)
            img = img.reshape([1, h, w, ch])
            #img = img.reshape([1,] + list(np.shape(img)))
            model_path = './models/step_38300_acc_0.890909016132/model'
            pred_list = eval.eval(model_path, img, 1, None)
            if pred_list[0][0] > 0.5:
                value = 'NORMAL'
            else:
                value = 'ABNORMAL'

            print 'form is save'
            return render(request, 'show_acc.html', {'value': value})
    else:
        form = UploadForm()
    return render(request, 'upload.html', {'form': form})
예제 #13
0
    def validation_epoch_end(self, outputs):
        mean_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
        mean_psnr = torch.stack([x['val_psnr'] for x in outputs]).mean()

        log = {'val/loss': mean_loss, 'val/psnr': mean_psnr}
        self.log("val/loss", mean_loss)
        wandb.log(log)

        self.hparams.scene_name = self.hparams.exp_name
        self.hparams.N_importance = 64

        ckpt_dir = os.path.join(self.hparams.log_dir, self.hparams.exp_name,
                                "ckpts")
        ckpts = [f for f in os.listdir(ckpt_dir) if "epoch" in f]
        if len(ckpts) != 0:
            ckpts.sort()

            self.hparams.eval_ckpt_path = os.path.join(ckpt_dir, ckpts[-1])
            img_gif, depth_gif = eval(self.hparams)

            wandb.log({
                "val/depth_gif":
                wandb.Video(depth_gif, fps=30, format="gif")
            })
            # else:
            wandb.log(
                {"val/out_gif": wandb.Video(img_gif, fps=30, format="gif")})

        return {
            'progress_bar': {
                'val_loss': mean_loss,
                'val_psnr': mean_psnr
            },
        }
예제 #14
0
def perlin_random(max_norm):

    images_folder = '/vol/gpudata/sd4215/segmentation/swiftnet/datasets/Cityscapes/img/left/leftImg8bit/'
    best_iou = 100

    for i in range(25):
        noise_func, bounds = get_noise_f(1024, 2048, 3, max_norm)
        freq = random.uniform(1 / 160, 1 / 20)
        freq_sin = random.uniform(4, 32)
        octave = random.randint(1, 4)
        curr_noise = noise_func((freq, freq_sin, octave))
        if os.path.isdir(images_folder + 'val/'):
            shutil.rmtree(images_folder + 'val/')
        shutil.copytree(images_folder + 'bo-temp/val/', images_folder + 'val/')
        for img_dir in ['frankfurt', 'lindau', 'munster']:
            for img_name in os.listdir(images_folder + 'val/' + img_dir):
                img_path = images_folder + 'val/' + img_dir + '/' + img_name
                orig_img = io.imread(img_path).astype(np.float)
                payload = perturb(orig_img, max_norm, curr_noise)
                io.imsave(fname=img_path, arr=payload.astype(np.uint8))
        iou = eval('configs/pyramid.py')

        if iou < best_iou:
            best_iou = iou
        print(best_iou)

    shutil.copytree(
        '/vol/gpudata/sd4215/segmentation/swiftnet/configs/out/val',
        '/vol/gpudata/sd4215/segmentation/swiftnet/results/random/' +
        str(max_norm))
    return payload
예제 #15
0
파일: run.py 프로젝트: GDPlumb/ExpO
def run_fn(args):

    np.random.seed()

    trial = args[0]
    reg = args[1]
    stddev_eval = args[2]
    stddev_reg = args[3]

    name = "TF/" + str(stddev_eval) + "/" + str(stddev_reg) + "/" + str(
        reg) + "/trial" + str(trial) + "/"

    cwd = os.getcwd()

    os.makedirs(name)
    os.chdir(name)

    manager = "regression"
    source = DATASET_PATH + dataset + ".csv"
    shape = [size] * depth
    out = eval(manager,
               source,
               hidden_layer_sizes=shape,
               learning_rate=rate,
               regularizer="Causal",
               c=reg,
               stddev_reg=stddev_reg,
               stop_on_loss=True,
               evaluate_explanation=True,
               stddev_eval=stddev_eval)

    with open("out.json", "w") as f:
        json.dump(out, f)

    os.chdir(cwd)
예제 #16
0
def search():
    if not session.get('logged_in'):
        return redirect(url_for('login'))
    stock = request.form.get("search_query")
    user = helpers.get_user()
    sub = user.subscription
    sub_data = user.sub_date
    if sub:
        if datetime.datetime.now() > sub_data:
            helpers.change_user(subscription=False)
            sub = False
    df = eval(stock, sub)
    prices = df['actual'].values.tolist()
    date = []
    for i in range(len(df)):
        date.append(str(df.at[i, 'Date'])[0:10])
    # date = df['Date'].apply(str).values.tolist()
    df.loc[df['action'] == 'HOLD', 'actual'] = None
    df_buy = df.copy()
    df_sell = df.copy()
    df_buy.loc[df_buy['action'] == 'SELL', 'actual'] = None
    prices_buy = df_buy['actual'].values.tolist()
    df_sell.loc[df_sell['action'] == 'BUY', 'actual'] = None
    prices_sell = df_sell['actual'].values.tolist()
    return json.dumps(
        {
            'Date': date,
            'Prices': prices,
            'Buy_Prices': prices_buy,
            'Sell_Prices': prices_sell,
            'Subscription': sub,
            'Return-fAI': "---",
            'Return-holding': "---"
        },
        ignore_nan=True)
예제 #17
0
파일: window.py 프로젝트: Oroth/leditor
def cmdRunEditorObj(window):
    curEd = window.editor
    imageRoot = curEd.buffer.root
    evalBuffer = buffer.BufferSexp(imageRoot, curEd.buffer.rootToCursorAdd())
    prog = lispObjEditor.LispObjEditor(eval.eval(evalBuffer))

    return window.addEditor(prog)
예제 #18
0
def run_fn(args, evaluate_explanation = True):

    np.random.seed()

    dataset = args[0]
    trial = args[1]
    depth = args[2]
    size = args[3]
    rate = args[4]

    name = args2name(dataset, trial, depth, size, rate)

    cwd = os.getcwd()

    os.makedirs(name)
    os.chdir(name)

    manager = "msd"
    source =  DATASET_PATH
    shape = [size] * depth
    out = eval(manager, source,
               hidden_layer_sizes = shape,
               learning_rate = rate,
               evaluate_explanation = evaluate_explanation,
               stop_on_loss = True,
               min_epochs = 10, stopping_epochs = 10)

    with open("out.json", "w") as f:
        json.dump(out, f)

    os.chdir(cwd)
예제 #19
0
def train(model: MemN2N, train_data, valid_data, config):
    """
    do train

    Args:
        model (MemN2N): the model to be evaluate
        train_data: training data
        valid_data: validating data
        config: model and training configs
    
    Returns:
        no return
    """
    lr = config.init_lr

    train_losses = []
    train_perplexities = []

    valid_losses = []
    valid_perplexities = []

    for epoch in range(1, config.nepoch + 1):
        train_loss = train_single_epoch(model, lr, train_data, config)
        valid_loss = eval(model, valid_data, config, "Validation")

        info = {'epoch': epoch, 'learning_rate': lr}

        # When the loss on the valid no longer drops, it's like learning rate divided by 1.5
        if len(valid_losses) > 0 and valid_loss > valid_losses[-1] * 0.9999:
            lr /= 1.5

        train_losses.append(train_loss)
        train_perplexities.append(math.exp(train_loss))

        valid_losses.append(valid_loss)
        valid_perplexities.append(math.exp(valid_loss))

        info["train_perplexity"] = train_perplexities[-1]
        info["validate_perplexity"] = valid_perplexities[-1]

        print(info)

        if epoch % config.log_epoch == 0:
            save_dir = os.path.join(config.checkpoint_dir, "model_%d" % epoch)
            paddle.save(model.state_dict(), save_dir)
            lr_path = os.path.join(config.checkpoint_dir, "lr_%d" % epoch)
            with open(lr_path, "w") as f:
                f.write(f"{lr}")

        # to get the target ppl
        if info["validate_perplexity"] < config.target_ppl:
            save_dir = os.path.join(config.checkpoint_dir, "model_good")
            paddle.save(model.state_dict(), save_dir)
            break

        if lr < 1e-5:
            break

    save_dir = os.path.join(config.checkpoint_dir, "model")
    paddle.save(model.state_dict(), save_dir)
예제 #20
0
파일: run.py 프로젝트: GDPlumb/ExpO
def run_fn(args, evaluate_explanation=True):

    np.random.seed()

    dataset = args[0]
    trial = args[1]
    depth = args[2]
    size = args[3]
    rate = args[4]
    reg = args[5]

    name = args2name(dataset, trial, depth, size, rate, reg)

    cwd = os.getcwd()

    os.makedirs(name)
    os.chdir(name)

    manager = "regression"
    source = DATASET_PATH + dataset + ".csv"
    shape = [size] * depth
    out = eval(manager,
               source,
               hidden_layer_sizes=shape,
               learning_rate=rate,
               regularizer="Causal1D",
               c=reg,
               stddev_reg=0.5,
               evaluate_explanation=evaluate_explanation,
               stop_on_loss=True)

    with open("out.json", "w") as f:
        json.dump(out, f)

    os.chdir(cwd)
예제 #21
0
파일: run.py 프로젝트: GDPlumb/ExpO
def run_fn(trial, type, evaluate_explanation = True):

    np.random.seed()

    dataset = "msd"
    depth = 5
    size = 100
    rate = 0.001
    reg = 0.1

    name = args2name(dataset, trial, depth, size, rate, reg)

    cwd = os.getcwd()

    os.makedirs(name)
    os.chdir(name)

    manager = "msd"
    source =  DATASET_PATH
    shape = [size] * depth
    out, epoch = eval(manager, source,
           hidden_layer_sizes = shape,
           learning_rate = rate,
           regularizer = type, c = reg, stddev_reg = 0.5,
           evaluate_explanation = False,
           stop_on_loss = True,
           min_epochs = 10, stopping_epochs = 10)

    with open("out.json", "w") as f:
        json.dump(out, f)

    os.chdir(cwd)
    
    return epoch
예제 #22
0
 def _do_python_eval(self, output_dir = 'output'):
     annotations = self._load_annotations()
     imagenames = self.image_index
     cachedir = os.path.join(self._dataset_path, 'annotations_cache')
     aps = []
     if not os.path.isdir(output_dir):
         os.mkdir(output_dir)
     for i, cls in enumerate(self._classes):
         if cls == '__background__':
             continue
         filename = self._get_results_file_template().format(cls)
         rec, prec, ap, sorted_scores = eval(
             filename, annotations, imagenames, cls, cachedir, ovthresh=0.5)
         aps += [ap]
         print('AP for {} = {:.4f}'.format(cls, ap))
         with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
             cPickle.dump({'rec': rec, 'prec': prec,
                           'ap': ap, 'scores': sorted_scores}, f)
         self._save_plots(os.path.join(output_dir, cls + '.png'),
                          cls, rec, prec, sorted_scores)
     print('Mean AP = {:.4f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     print('Results:')
     for ap in aps:
         print('{:.3f}'.format(ap))
     print('{:.3f}'.format(np.mean(aps)))
     print('~~~~~~~~')
예제 #23
0
def main():
    global opt, model
    opt = parser.parse_args()
    logger = set_logger(opt.save)
    print(opt)
    print(opt, file=logger)

    # setting gpu and seed
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)
    torch.manual_seed(opt.seed)
    torch.cuda.manual_seed(opt.seed)

    # setting dataset
    print("===> Loading dataset")
    patches = datagenerator(data_dir=opt.data_train)
    train_set = DenoisingDataset(patches, sigma=opt.sigma)
    training_data_loader = DataLoader(dataset=train_set,
                                      num_workers=opt.threads,
                                      drop_last=True,
                                      batch_size=opt.batchSize,
                                      shuffle=True)

    # setting model and loss
    print("===> Building model")
    model = DUAL_CNN_DENOISE()
    criterion = nn.MSELoss(size_average=False)
    model = model.cuda()
    criterion = criterion.cuda()

    # setting optimizer
    print("===> Setting Optimizer")
    kwargs = {'weight_decay': opt.weight_decay}
    optimizer = optim.Adam([{
        "params": model.structure_net.parameters(),
        "lr": opt.srcnn_lr
    }, {
        "params": model.detail_net.parameters(),
        "lr": opt.vdsr_lr
    }], **kwargs)

    print("===> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        train(training_data_loader, optimizer, model, criterion, epoch, logger)
        model_path = save_checkpoint(model, epoch)
        eval.eval(model_path, opt.save, opt.sigma)
예제 #24
0
def Main(out, read, Reg_no):
    f = open("symbol_temp.txt", "w")
    f.close()
    f = open("symbol_original.txt", "w")
    f.close()
    tostr = []
    # f = open('input.txt', 'r')
    # read = f.readlines()
    k = 0
    for input_exp in read:
        # print '\n\n'+input_exp
        input_exp = str(input_exp)
        # print k
        k = eval.eval(input_exp, k, tostr)
        # input_exp = raw_input('Enter the Expression : ')
        # eval.eval(input_exp)

    f.close()
    d = []

    f1 = open("inout.txt", "r")

    lines = f1.readlines()
    f1.close()
    os.system("rm inout.txt")
    for i in lines:
        i = i.split("\n")
        d.append(i[0])

    quad = []

    j = 0
    for i in d:
        quad.append(re.findall("\w+|\+|-|\*", d[j]))
        j = j + 1

    t = usage.gen(quad)
    # print t

    f = 0
    # print t
    # 	print '\n\n\n'

    # 	for i , j in t.iteritems():
    # 		for k in j:
    # 			print k[0],'\t',
    # 			print 'lu'+str(k[1]),'\t',
    # 			if k[2] == None:
    # 				print 'nnu\t',
    # 			else:
    # 				print 'nu'+str(k[2])+'\t',
    # 			print
    # 		print
    # 	print

    # 	print quad

    table.get(quad, t, out, Reg_no, tostr)
    os.system("rm symbol_temp.txt")
예제 #25
0
def Main(out, read,Reg_no):
	f = open("symbol_temp.txt", "w")
	f.close()
        f = open("symbol_original.txt", "w")	
	f.close()
	tostr = []
	#f = open('input.txt', 'r')
	#read = f.readlines()
	k = 0
	for input_exp in read:
		#print '\n\n'+input_exp
		input_exp = str(input_exp)
		#print k
		k = eval.eval(input_exp, k, tostr)
	#input_exp = raw_input('Enter the Expression : ')
	#eval.eval(input_exp)
	
	f.close()
	d = []
	
	f1 = open('inout.txt' , 'r')
	
	lines = f1.readlines()
	f1.close()
	os.system('rm inout.txt')
	for i in lines:
		i = i.split('\n')
		d.append(i[0])

	quad = []

	j = 0
	for i in d:
		quad.append(re.findall('\w+|\+|-|\*', d[j]))
		j = j + 1

	t = usage.gen(quad)
	#print t

	f = 0
	#print t
#	print '\n\n\n'

#	for i , j in t.iteritems():
#		for k in j:
#			print k[0],'\t',
#			print 'lu'+str(k[1]),'\t',
#			if k[2] == None:
#				print 'nnu\t',
#			else:
#				print 'nu'+str(k[2])+'\t',
#			print
#		print
#	print

#	print quad	

	table.get(quad , t,out, Reg_no, tostr)
	os.system("rm symbol_temp.txt")
예제 #26
0
파일: repl.py 프로젝트: WaldonChen/vivid
def repl(prompt='vivid> '):
    while True:
        try:
            val = eval(parse(raw_input(prompt)))
            if val is not None:
                print repr(val)
        except VividError, e:
            print e
예제 #27
0
def test_output(en_is_src, setname):
    for lan in LANGUAGES:
        pairname = 'en-' + lan if (en_is_src) else lan + '-en'
        print(pairname + '.11' + setname + '\n')
        sys.stderr.write(pairname + '.11' + setname + '\n')

        avgbleu_file = os.path.join(BLEUROOT_HK, pairname + '.11' + setname)
        default_file = os.path.join(DEFAULTROOT, pairname + '.11' + setname)

        prds = readnums(default_file)
        labels = readnums(avgbleu_file)

        eval.eval(prds, labels)
        print("")
        print("========")
        sys.stderr.write("\n")
        sys.stderr.write("===========\n")
예제 #28
0
def repl(prompt='vivid> '):
    while True:
        try:
            val = eval(parse(raw_input(prompt)))
            if val is not None:
                print repr(val)
        except VividError, e:
            print e
예제 #29
0
def run():
    model = Model()
    for tgtlan in LANGUAGES:
        print "==="+tgtlan+"==="
        # model = Model()
        model.loadTrain(tgtlan)
        model.loadTest(tgtlan)            
        if len(model.featureids) == 0: model.loadFeatures()
        # eval.eval(linear_regression(model.featurestrain.values(), model.train_labels, model.featurestest.values()))
        model.writeFeatures()
        #print('aha'); sys.exit()
        print "= eval test, combined"
        prds = liblinear_train_perdict_svr("temp.train","temp.test",True)
        eval.eval(prds, model.test_labels)
        print "= eval dev, combined"
        prds = liblinear_train_perdict_svr("temp.train","temp.dev",True)
        eval.eval(prds, model.dev_labels)
예제 #30
0
def default_output(en_is_src, year):
    filedict = getallfilepairs(en_is_src, year)
    for k, v in filedict.items():
        filetag = k + "." + year
        print(filetag + '\n')
        sys.stderr.write(filetag + '\n')

        avgbleu_file = os.path.join(BLEUROOT_HK, filetag)
        default_file = os.path.join(DEFAULTROOT, filetag)

        prds = readnums(default_file)
        labels = readnums(avgbleu_file)

        eval.eval(prds, labels)
        print("")
        print("========")
        sys.stderr.write("\n")
        sys.stderr.write("===========\n")
예제 #31
0
def eval_output(year, cproot):
    filedict = getallfilepairs(True, year)
    for k, v in filedict.items():
        filetag = k + '.' + year
        print(filetag + '\n')
        sys.stderr.write(filetag + '\n')

        avgbleu_file = os.path.join(BLEUROOT_HK, filetag)
        predict_file = os.path.join(cproot, filetag)

        prds = readnums(predict_file)
        labels = readnums(avgbleu_file)

        eval.eval(prds, labels)
        print("")
        print("==========")
        sys.stderr.write("\n")
        sys.stderr.write("============\n")
예제 #32
0
def run_eval():
    try:
        with open(answer_file_name, "r") as student_ans:
            with open(answer_key_name, "r") as answer_key:
                grades = eval.eval(answer_key, student_ans)
    except IOError as e:
        sys.stderr.write(
            "Couldn't open answer key \'" + answer_key_name + "\'\n")
        sys.stderr.write(e.message + "\n")
예제 #33
0
def run(do_train, do_eval, do_predict, ckpt, get_rouge, max_epochs=100):
    train_set = Articles(test=False)
    test_set = Articles(test=True)
    train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=False, num_workers=1)
    test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=1)

    encoder = Encoder()
    attention_decoder = AttnDecoder()
    model = PointerGenerator(encoder, attention_decoder)
    model.to(device)
    optimizer = torch.optim.Adagrad(model.parameters(), lr=lr)
    loss_function = torch.nn.NLLLoss()

    if ckpt:
        model, optimizer, epoch = load_ckp(checkpoint_path=ckpt, model=model, optimizer=optimizer)
        if do_eval:
            eval(test_loader, model, loss_function)
        elif do_predict:
            vocab = Vocab('data/vocab', voc_size)
            batch = iter(train_loader).next()
            story, highlight = batch
            batcher = Batcher(story, highlight, vocab)
            stories, highlights, extra_zeros, story_extended, highlight_extended, vocab_extended = batcher.get_batch(
                get_vocab_extended=True)

            stories = stories.to(device)
            highlights = highlights.to(device)
            story_extended = story_extended.to(device)
            extra_zeros = extra_zeros.to(device)

            # stories, highlights = get_random_sentences(test_set, batch_size)
            with torch.no_grad():
                output = model(stories, highlights, story_extended, extra_zeros)

            get_batch_prediction(stories, output, highlights)
    if get_rouge:
        get_rouge_files(model, test_loader)
        get_rouge_score()

    else:
        epoch = 0

    if do_train:
        train(train_loader, test_loader, loss_function, model, optimizer, epoch, num_epochs=max_epochs - epoch)
예제 #34
0
def test(threshold = 0.5):
    dtest = xgb.DMatrix(df_t)
    y = bst.predict(dtest)
    pred = pd.concat([users_t,pd.DataFrame(y)],axis=1,ignore_index=False)
    pr = pred[pred[0]> threshold]
    del pr[0]
    yture = pd.concat([users_t, pd.DataFrame(label_t)], axis =1)
    yture = yture[yture['label']>0]
    pr = pr.drop_duplicates('user_id')
    return eval.eval(pr,yture, True)
예제 #35
0
def repl(env=global_env):
    while True:
        expr = input("lispy> ")
        if expr == "quit":
            return
        try:
            result = eval(parse(expr), env=env)
            print("=> %s" % print_expression(result))
        except Exception as e:
            traceback.print_exc()
예제 #36
0
def predict_file():
    global info, lock, last_update_time
    slide_file = None
    lock.acquire()
    for file in info:
        if not info[file]['presents']: continue
        if info[file]['result'] is None or not isinstance(
                info[file]['result'], (tuple, list)):
            slide_file = file
            break
    lock.release()
    if not slide_file: return False

    def update_progress(progress):
        global info, lock, last_update_time
        lock.acquire()
        progress_text = None
        if progress >= 0:
            progress_text = '%d%%' % int(progress)
        if info[slide_file]['result'] != progress_text:
            info[slide_file]['result'] = progress_text
            last_update_time = utils.cur_time_str()
            # utils.save_json(info_path, info)
            # print('Updated: ' + last_update_time)
        lock.release()

    import eval
    print('Evaluating %s' % slide_file)
    result_file = os.path.join(info_folder, slide_file + '-result.png')
    try:
        width, height = eval.eval(os.path.join(data_folder, slide_file),
                                  os.path.join('client', result_file),
                                  update_progress)
    except:
        update_progress(-1)
        return False
    html_path = os.path.join(info_folder, slide_file + '-result.html')

    def create_html(width, height, thumbnail_path, result_file, html_path):
        html = '<img  width=%d height=%d src=%s>' % (
            width, height, thumbnail_path.split('/')[-1])
        html += '<img  width=%d height=%d src=%s>' % (
            width, height, result_file.split('/')[-1])
        utils.save_file(os.path.join('client', html_path), html)

    create_html(width, height, info[slide_file]['thumbnail'], result_file,
                html_path)

    lock.acquire()
    info[slide_file]['result'] = (html_path, result_file)
    last_update_time = utils.cur_time_str()
    utils.save_json(info_path, info)
    print('Updated: ' + last_update_time)
    lock.release()
    return True
예제 #37
0
def call():
	eval.eval()
	d=[]
        f1 = open ( 'inout.txt', 'r')
        lines = f1.readlines()
        for i in lines:
                i = i.split('\n')
                d.append(i[0])

	quad = []
        i = 0
        while(i<len(d)):
                quad.append(re.findall('\w+|\+|-|\*', d[i]))
                i=i+1

	t=[]
	usage.gen(quad, t) # WORKS FINE!!!
	#print output as (var, lux, nuy)
	for i in t:
		print "("+str(i[0])+","+str(i[1][0])+","+str(i[1][1])+")"
예제 #38
0
def main():
    TrainImgLoader = torch.utils.data.DataLoader(dataset,
                                                 batch_size=batch_size,
                                                 shuffle=True,
                                                 num_workers=1,
                                                 drop_last=True,
                                                 pin_memory=True)
    log = logger.Logger(args.savemodel, name=args.logname)

    start_full_time = time.time()
    start_epoch = 1 if args.resume is None else int(
        re.findall('(\d+)', args.resume)[0]) + 1
    total_iters = 0
    for epoch in range(start_epoch, args.epochs + 1):
        total_train_loss = 0
        total_train_rmse = 0
        # training loop
        for batch_idx, (imgL_crop, imgR_crop,
                        flowl0) in enumerate(TrainImgLoader):
            start_time = time.time()
            loss, vis = train(imgL_crop, imgR_crop, flowl0)
            if (total_iters + 1) % 20 == 0:
                print(
                    'Epoch %d Iter %d/%d training loss = %.3f , RMSE = %.3f , time = %.2f'
                    % (epoch, batch_idx, len(TrainImgLoader), loss,
                       vis['RMSE'], time.time() - start_time))
            total_train_loss += loss
            total_train_rmse += vis['RMSE']
            total_iters += 1
        savefilename = args.savemodel + '/' + args.logname + '/finetune_' + str(
            epoch) + '.tar'
        save_dict = model.state_dict()
        save_dict = collections.OrderedDict({
            k: v
            for k, v in save_dict.items()
            if ('flow_reg' not in k or 'conv1' in k) and ('grid' not in k)
        })
        torch.save(
            {
                'epoch': epoch,
                'state_dict': save_dict,
                'train_loss': total_train_loss / len(TrainImgLoader),
            }, savefilename)
        log.scalar_summary('train/loss',
                           total_train_loss / len(TrainImgLoader), epoch)
        log.scalar_summary('train/RMSE',
                           total_train_rmse / len(TrainImgLoader), epoch)
        log.scalar_summary('test/RMSE', eval(model, TestImgLoader), epoch)
        log.scalar_summary('train/learning rate',
                           optimizer.param_groups[0]['lr'], epoch)
        scheduler.step(total_train_loss / len(TrainImgLoader))

    print('full finetune time = %.2f HR' %
          ((time.time() - start_full_time) / 3600))
예제 #39
0
파일: repl.py 프로젝트: JanFan/AScheme
def repl(prompt='lispy> ', inport=InPort(sys.stdin), out=sys.stdout):
    "A prompt-read-eval-print loop."
    sys.stderr.write("Lispy version 2.0\n")
    while True:
        try:
            if prompt: sys.stderr.write(prompt)
            x = parse(inport)
            if x is eof_object: return
            val = eval(x)
            if val is not None and out: print >> out, to_string(val)
        except Exception as e:
            print '%s: %s' % (type(e).__name__, e)
예제 #40
0
파일: main.py 프로젝트: aelaguiz/icfp2012
def main():
    args = parseArgs()

    map = Map()

    for line in sys.stdin.readlines():
        line = line.rstrip('\n')
        map.addLine(line)
    map.init()

    print "Loaded", map.width, "x", map.height
    print map

    if args.regress:
        print "Regressing..."
        regress(map)
    elif args.aggress:
        print "Aggressing..."
        aggress(map)
    elif args.svm:
        svm(map)
    elif args.eval:
        eval(args.eval, map)
예제 #41
0
파일: scheme.py 프로젝트: jorgy/SchemePy
def run_interpreter():
	while True:
		try:
			expr = raw_input("STk> ").lower()
		except EOFError:
			print "Bye!"
			exit(0)

		tokens = tokenize(expr)
		evaluated = eval.eval(tokens)
		if evaluated:
			print evaluated
		else:
			print tokens
예제 #42
0
def check_gold(url, target, resp):
    pos_res = '(see the reason below)'
    #pos_res = '1'
    if resp is None:
        if url in test_sents:
            score, _ = eval.eval(target, test_sents[url], scorer, eval_tokenizer)
            app.logger.info(score)
            if score > .2:
                return pos_res
            else:
                return target

    # for the sake of non distinguisable test and normal, return randomly
    if random.random() < .5:
        return target
    else:
        return pos_res
예제 #43
0
    def evalCmdBarResult(self, cmdBuffer):
        # Maybe should get done in the actual cmdbar
        cmd = cmdBuffer.toPyExp()
        print cmd

        if cmd and cmd[0] in ('q', 'quit'):
            return 'QUIT-WM'

        result = eval.eval(buffer.BufferSexp(cmdBuffer.root), self.getCmdBarEnv())
        print result

        if isinstance(result, WindowManager):
            return result.update('cmdBar', None)

        self.message = reader.to_string(result)

        return self.updateList(
            ('cmdBar', None))
예제 #44
0
def REPL():
    globalenv = makeglobalenv()

    while True:
        try:
            inp = input('* ')
            while True:
                try:
                    sexp = parse(inp)
                    break
                except SyntaxError as e:
                    if e.msg == 'Unexpected end of token stream':
                        inp += ' ' + input('  ')
                    else:
                        raise e
            print(tostring(eval(sexp, globalenv)))
        except (KeyboardInterrupt, EOFError):
            print("Exiting... Bye!")
            return
        except Exception as e:
            print(str(e))
예제 #45
0
파일: CodeEditor.py 프로젝트: Oroth/leditor
 def evalBuffer(self):
     eval.eval(buffer.BufferSexp(self.buffer.root), self.env, self.storeNodeValue)
예제 #46
0
 def singleFeatureEval(self, key):        
     print "=eval, test, "+key        
     print eval.eval(self.featurestest[key], self.test_labels)
     print "=eval, dev, "+key
     print eval.eval(self.featuresdev[key], self.dev_labels)
예제 #47
0
#!/opt/local/bin/python3.3

import sys

import eval
import parser

if __name__ == '__main__':
	file = sys.argv[1]
	content = open(file).read()
	program = parser.parse(content)
	eval.eval(program)
예제 #48
0
파일: lang.py 프로젝트: cocaman/pythonect
    def __call__(self, globals_, locals_):

        import eval

        return eval.eval(self.__expression, globals_, locals_)
예제 #49
0
파일: repl.py 프로젝트: frawgie/connive
def evaluate_file(source):
    fd = open(source, 'r')
    program = fd.read()
    tokenized = parse(program)
    print eval(tokenized)
예제 #50
0
파일: lispy.py 프로젝트: hhuang97/lispy
import sys

if len(sys.argv) == 1:
    readline.parse_and_bind("set editing-mode emacs")

    lispy = lispy_parser()
    while True:
        try:
            line = input('>>> ')
        except KeyboardInterrupt:
            print('\n')
            break
        if line != '':
            ast = lispy.parse(line)
            if ast:
                e = eval(global_scope, ast)
                if e is not None:
                    print(eval(global_scope, ast))

else:
    lispy = lispy_parser()

    file_name = sys.argv[1]
    f = open(file_name, 'r')
    code = f.read()
    code = '(' + code + ')'
    code = lispy.parse(code)

    for sexp in code['value']:
        eval(global_scope, sexp)
예제 #51
0
파일: expand.py 프로젝트: JanFan/AScheme
def expand(x, toplevel=False):
    "Walk tree of x, making optimizations/fixes, and signaling SyntaxError."
    require(x, x!=[])                    # () => Error
    if not isa(x, list):                 # constant => unchanged
        return x
    elif x[0] is _quote:                 # (quote exp)
        require(x, len(x)==2)
        return x
    elif x[0] is _if:
        if len(x)==3: x = x + [None]     # (if t c) => (if t c None)
        require(x, len(x)==4)
        return map(expand, x)
    elif x[0] is _set:
        require(x, len(x)==3);
        var = x[1]                       # (set! non-var exp) => Error
        require(x, isa(var, Symbol), "can set! only a symbol")
        return [_set, var, expand(x[2])]
    elif x[0] is _define or \
         x[0] is _definemacro or \
         x[0] is _defineactor:
        require(x, len(x)>=3)
        _def, v, body = x[0], x[1], x[2:]
        if isa(v, list) and v:           # (define (f args) body)
            f, args = v[0], v[1:]        #  => (define f (lambda (args) body))
            return expand([_def, f, [_lambda, args]+body])
        else:
            require(x, len(x)==3)        # (define non-var/list exp) => Error
            require(x, isa(v, Symbol), "can define only a symbol")
            exp = expand(x[2])
            if _def is _definemacro:
                require(x, toplevel, "define-macro only allowed at top level")
                proc = eval(exp)
                require(x, callable(proc), "macro must be a procedure")
                macro_table[v] = proc    # (define-macro v proc)
                return None              #  => None; add v:proc to macro_table
            return [_define, v, exp]
    elif x[0] is _begin:
        if len(x)==1: return None        # (begin) => None
        else: return [expand(xi, toplevel) for xi in x]
    elif x[0] is _lambda:                # (lambda (x) e1 e2)
        require(x, len(x)>=3)            #  => (lambda (x) (begin e1 e2))
        vars, body = x[1], x[2:]
        require(x, (isa(vars, list) and all(isa(v, Symbol) for v in vars))
                or isa(vars, Symbol), "illegal lambda argument list")
        exp = body[0] if len(body) == 1 else [_begin] + body
        return [_lambda, vars, expand(exp)]
    elif x[0] is _quasiquote:            # `x => expand_quasiquote(x)
        require(x, len(x)==2)
        return expand_quasiquote(x[1])
    elif isa(x[0], Symbol) and x[0] in macro_table:
        return expand(macro_table[x[0]](*x[1:]), toplevel) # (m arg...)
    elif x[0] is _spawn:
        require(x, len(x)>=2)
        return [_spawn] + map(expand, x[1:])
    elif x[0] is _join:
        require(x, len(x)>=2)
        return [_join] + map(expand, x[1:])
    elif x[0] is _value:
        require(x, len(x)==2)
        return [_value] + map(expand, x[1:])
    elif x[0] is _spawnactor:
        require(x, len(x)>=2)
        return [_spawnactor] + map(expand, x[1:])
    elif x[0] is _startactor:
        require(x, len(x)>=2)
        return [_startactor] + map(expand, x[1:])
    elif x[0] is _joinactor:
        require(x, len(x)>=2)
        return [_joinactor] + map(expand, x[1:])
    elif x[0] is _send:
        require(x, len(x)==3)
        return [_send] + map(expand, x[1:])
    elif x[0] is _rcv:
        require(x, len(x)==1)
        return x
    elif x[0] is _makemsg:
        require(x, len(x)==2)
        return [_makemsg] + map(expand, x[1:])
    elif x[0] is _getinfo:
        require(x, len(x)==2)
        return [_getinfo] + map(expand, x[1:])
    elif x[0] is _getsender:
        require(x, len(x)==2)
        return [_getsender] + map(expand, x[1:])
    else:                                #        => macroexpand if m isa macro
        return map(expand, x)            # (f arg...) => expand each
예제 #52
0
    def loadEditorSettingsFromPyExp(self, pyExp):
        root = tn.TNode(tn.createTNodeExpFromPyExp(pyExp))
        newBuff = buffer.BufferSexp(root)

        return eval.eval(newBuff)
예제 #53
0
import table


if __name__ == "__main__":
	f = open("symbol_temp.txt", "w")
	f.close()
        f = open("symbol_original.txt", "w")	
	f.close()
	f = open('input.txt', 'r')
	read = f.readlines()
	k = 0
	for input_exp in read:
		#print '\n\n'+input_exp
		input_exp = str(input_exp)
		#print k
		k = eval.eval(input_exp, k)
	#input_exp = raw_input('Enter the Expression : ')
	#eval.eval(input_exp)
	
	f.close()
	d = []
	
	f1 = open('inout.txt' , 'r')
	
	lines = f1.readlines()
	f1.close()
	os.system('rm inout.txt')
	for i in lines:
		i = i.split('\n')
		d.append(i[0])
예제 #54
0
파일: repl.py 프로젝트: frawgie/connive
def repl():
    while True:
        line = eval(parse(raw_input('$ ')))
        print line
예제 #55
0
파일: __init__.py 프로젝트: JanFan/AScheme
from parse import parse
from eval import eval

########## built-in macro

eval(parse("""(begin
;; and
(define-macro and (lambda args
   (if (null? args) #t
       (if (= (length args) 1) (car args)
           `(if ,(car args) (and ,@(cdr args)) #f)))))

;; More macros can also go here
)"""))

eval(parse("""
;; cond
(define-macro cond (lambda args
  (if (not (null? args))
    (let ((hd (car args)))
      (if (= "else" (car hd))
        `(begin ,@(cdr hd))
        `(if ,(car hd)
             (begin ,@(cdr hd))
             (if ,(not (null? args))
               (cond ,@(cdr args)))))))))
"""))