Пример #1
0
    def test_inverse(self):

        kerr_metric  = metric.metric(1.9,0.2,0.7)
        kerr_inverse_metric  = metric.inverse_metric(1.9,0.2,0.7)
        numpy_inverse = np.linalg.inv(kerr_metric)

        # Check that the calculated inverse is equal to numpy's inverse

        npt.assert_almost_equal( kerr_inverse_metric, numpy_inverse)
Пример #2
0
def train():
    train_feature,train_label = load_feature_label("train_feature.npy")
    model = SVC(kernel='rbf', C=1e3, gamma=0.5, class_weight='balanced', probability=True)
    model.fit(train_feature, train_label)
    joblib.dump(model, "./model.m")
    predict_proba = model.predict_proba(train_feature)
    predict = model.predict(train_feature)
    acc,eer,hter = metric(predict_proba,train_label)
    print("train acc is:%f eer is:%f hter is:%f"%(acc,eer,hter))
Пример #3
0
    def test_mathematica_comparison(self):

        kerr_metric = metric.metric(1.9, 0.2, 0.7)
        methematica_kerr_metric = np.array(
            [[-0.06877807693, 0, 0, -0.02572840654], [0, 13.60219981, 0, 0],
             [0, 0, 4.080659944, 0], [-0.02572840654, 0, 0, 0.1625358035]])

        # Check the nonzero components

        npt.assert_almost_equal(kerr_metric, methematica_kerr_metric)
Пример #4
0
    def test_mathematica_comparison(self):

        kerr_metric  = metric.metric(1.9,0.2,0.7)
        methematica_kerr_metric = np.array([[-0.06877807693,0,0,-0.02572840654],
                                            [0,13.60219981,0,0],
                                            [0,0,4.080659944,0],
                                            [-0.02572840654,0,0,0.1625358035]])

        # Check the nonzero components

        npt.assert_almost_equal( kerr_metric, methematica_kerr_metric)
Пример #5
0
Файл: main.py Проект: HBzju/OPLL
def run(data):
    # data = read_data(data_file, 2)
    n = data['X_train'].shape[0]
    p = data['X_train'].shape[1]
    # p is the feature number of X
    q = data['Y_train'].shape[1]
    # q is the feature number of Y
    # W = np.random.random((p + 1, q))
    W = np.zeros((p + 1, q))
    C = 1

    X_train = np.append(data['X_train'],
                        np.ones((data['X_train'].shape[0], 1)),
                        axis=1)

    for i in range(0, n):
        x = X_train[i]

        y = data['Y_train'][i]
        scores = W.T.dot(x)
        s_pos, s_neg = get_pos_neg(y, scores)

        l_t = 1 - scores[s_pos] + scores[s_neg]
        if l_t <= 0:
            continue
        tau = l_t / (2.0 * x.dot(x) + 1.0 / (2 * C))
        W[:, s_pos] += tau * x
        W[:, s_neg] -= tau * x

    scores = np.append(data['X_test'],
                       np.ones((data['X_test'].shape[0], 1)),
                       axis=1).dot(W)
    y_pred = np.zeros_like(scores).astype(int)
    indices = np.argmax(scores, axis=1)
    # print(indices)
    for i in range(0, y_pred.shape[0]):
        y_pred[i, indices[i]] = 1
    metrics = metric(data['Y_test'], y_pred)

    # for value in metrics:
    #     print('%0.4f\t' % value, end="")
    return metrics[0]
Пример #6
0
def run_omd_l2(data):
    #data = read_data(data_file, 2)
    n = data['X_train'].shape[0]
    p = data['X_train'].shape[1]
    # p is the feature number of X
    q = data['Y_train'].shape[1]
    # q is the feature number of Y
    W = np.zeros((p + 1, q))
    Q = np.zeros_like(W)
    eta = 5e-3

    X_train = np.append(data['X_train'],
                        np.ones((data['X_train'].shape[0], 1)),
                        axis=1)

    for i in range(0, n):
        x = X_train[i]
        y = data['Y_train'][i]
        scores = W.T.dot(x)
        s_pos, s_neg = get_pos_neg(y, scores)

        l_t = 1 - scores[s_pos] + scores[s_neg]
        if l_t <= 0:
            continue
        Q[:, s_pos] += x
        Q[:, s_neg] -= x
        W = np.exp(eta * Q - 1)

    scores = np.append(data['X_test'],
                       np.ones((data['X_test'].shape[0], 1)),
                       axis=1).dot(W)
    y_pred = np.zeros_like(scores).astype(int)
    indices = np.argmax(scores, axis=1)
    # print(indices)
    y_pred[np.arange(y_pred.shape[0]), indices] = 1
    metrics = metric(data['Y_test'], y_pred)
    # for value in metrics:
    #     print('%0.4f\t' % value, end="")
    return metrics[0]
Пример #7
0
def check_all_pair(dic, i):
    k = open(html_path.format(lang='kor', idx=i), "r", encoding='UTF8')
    sources_k = BeautifulSoup(k, "html.parser")
    e = open(html_path.format(lang='eng', idx=i), "r", encoding='UTF8')
    sources_e = BeautifulSoup(e, "html.parser")

    k.close()
    e.close()
    #Metric 평가요소
    t1 = reference.reference(sources_k, sources_e)
    t2 = tree_compare.tree_compare(sources_k, sources_e)
    t3 = photo_check.photo_check(sources_k, sources_e)
    t4 = check_translate_pair.check_translate_pair(sources_k)
    t5 = paragraph.paragraph(sources_k, sources_e)
    t6 = reading.reading(sources_k, sources_e)

    ck_link_list = [[]]
    e_link_list = [[]]

    if (t5 == -1):
        return -1, -1, -1

    #Metric
    metric_result = metric.metric(t1, t2, t3, t4, t5, t6)
    #print (metric_result)

    ck = header.header(sources_k, sources_e, i, metric_result)
    if ck == -1:
        return -1, -1, -1
    else:
        k_link_list, e_link_list = header_for_link.header_for_link(
            sources_k, sources_e, i, metric_result)
        if k_link_list == -1:
            return -1, -1, -1
    ck_link_list = translate_k_to_e.translate_k_to_e(dic, k_link_list)

    return ck_link_list, e_link_list, metric_result
Пример #8
0
import tensorflow as tf
import json
import os
import argparse
import random
from data_helper import tf_idf_data_helper
from dnn import dnn
from metric import metric

metric = metric()
rate = 0.8

class DNN_trainer:
    def __init__(self, arg):
        self.arg = arg
        with open(self.arg.config_path, 'r', encoding='utf-8') as f:
            self.config = json.load(f)

        # data_helper 定义
        self.data_loader_obj = tf_idf_data_helper(stop_word_path= self.config['stop_word_path'], low_freq=self.config['low_freq'])

        # all data
        self.all_data, self.all_labels = self.data_loader_obj.gen_data(self.config['train_data'])
        self.tf_idf_len = self.data_loader_obj.dictionary_len
        self.class_nums = self.data_loader_obj.class_nums
        print("<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>")
        print("class nums :{}".format(self.class_nums))
        print("<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>")

        # train data
        self.train_data_len = int(len(self.all_data)*rate)
Пример #9
0
    def evaluate(self, eval_data, test=False):
        # ========================================
        #             Validation / Test
        # ========================================
        # After the completion of each training epoch, measure our performance on
        # our validation set.
        # Also applicable to test set.
        t0 = time.time()

        if test:
            if self.load_model_path:
                self.model = torch.load(self.load_model_path +
                                        self.model_name + ".pt")
            elif eval_data == "HiEve":
                self.model = torch.load(self.HiEve_best_PATH)
            else:  # MATRES
                self.model = torch.load(self.MATRES_best_PATH)
            self.model.to(self.cuda)
            print("")
            print("loaded " + eval_data + " best model:" + self.model_name +
                  ".pt")
            print("(from epoch " + str(self.best_epoch) + " )")
            print("Running Evaluation on " + eval_data + " Test Set...")
            if eval_data == "MATRES":
                dataloader = self.test_dataloader_MATRES
            else:
                dataloader = self.test_dataloader_HIEVE
        else:
            # Evaluation
            print("")
            print("Running Evaluation on Validation Set...")
            if eval_data == "MATRES":
                dataloader = self.valid_dataloader_MATRES
            else:
                dataloader = self.valid_dataloader_HIEVE

        self.model.eval()

        y_pred = []
        y_gold = []
        # Evaluate data for one epoch
        for batch in dataloader:
            x_sent = batch[3].to(self.cuda)
            y_sent = batch[4].to(self.cuda)
            z_sent = batch[5].to(self.cuda)
            x_position = batch[6].to(self.cuda)
            y_position = batch[7].to(self.cuda)
            z_position = batch[8].to(self.cuda)
            xy = batch[12].to(self.cuda)
            yz = batch[13].to(self.cuda)
            xz = batch[14].to(self.cuda)
            flag = batch[15].to(self.cuda)
            with torch.no_grad():
                if self.finetune:
                    alpha_logits, beta_logits, gamma_logits = self.model(
                        x_sent,
                        y_sent,
                        z_sent,
                        x_position,
                        y_position,
                        z_position,
                        xy,
                        yz,
                        xz,
                        flag,
                        loss_out=None)
                else:
                    with torch.no_grad():
                        x_sent_e = self.my_func(x_sent)
                        y_sent_e = self.my_func(y_sent)
                        z_sent_e = self.my_func(z_sent)
                    alpha_logits, beta_logits, gamma_logits = self.model(
                        x_sent_e,
                        y_sent_e,
                        z_sent_e,
                        x_position,
                        y_position,
                        z_position,
                        xy=xy,
                        yz=yz,
                        xz=xz,
                        flag=flag,
                        loss_out=None)
            # Move logits and labels to CPU
            label_ids = xy.to('cpu').numpy()
            y_predict = torch.max(alpha_logits, 1).indices.cpu().numpy()
            y_pred.extend(y_predict)
            y_gold.extend(label_ids)

        # Measure how long the validation run took.
        validation_time = format_time(time.time() - t0)
        print("Eval took: {:}".format(validation_time))

        if eval_data == "MATRES":
            Acc, P, R, F1, CM = metric(y_gold, y_pred)
            print("  P: {0:.3f}".format(P))
            print("  R: {0:.3f}".format(R))
            print("  F1: {0:.3f}".format(F1))
            if test:
                print("Test result:", file=self.file)
                print("  P: {0:.3f}".format(P), file=self.file)
                print("  R: {0:.3f}".format(R), file=self.file)
                print("  F1: {0:.3f}".format(F1), file=self.file)
                print("  Confusion Matrix", file=self.file)
                print(CM, file=self.file)
            if not test:
                if F1 > self.MATRES_best_micro_F1 or path.exists(
                        self.MATRES_best_PATH) == False:
                    self.MATRES_best_micro_F1 = F1
                    self.MATRES_best_cm = CM
                    ### save model parameters to .pt file ###
                    torch.save(self.model, self.MATRES_best_PATH)
                    return 1

        if eval_data == "HiEve":
            # Report the final accuracy for this validation run.
            cr = classification_report(y_gold, y_pred, output_dict=True)
            rst = classification_report(y_gold, y_pred)
            F1_PC = cr['0']['f1-score']
            F1_CP = cr['1']['f1-score']
            F1_coref = cr['2']['f1-score']
            F1_NoRel = cr['3']['f1-score']
            F1_PC_CP_avg = (F1_PC + F1_CP) / 2.0
            print(rst)
            print("  F1_PC_CP_avg: {0:.3f}".format(F1_PC_CP_avg))
            if test:
                print("  rst:", file=self.file)
                print(rst, file=self.file)
                print("  F1_PC_CP_avg: {0:.3f}".format(F1_PC_CP_avg),
                      file=self.file)
            if not test:
                if F1_PC_CP_avg > self.HiEve_best_F1 or path.exists(
                        self.HiEve_best_PATH) == False:
                    self.HiEve_best_F1 = F1_PC_CP_avg
                    self.HiEve_best_prfs = rst
                    torch.save(self.model, self.HiEve_best_PATH)
                    return 1
        return 0
Пример #10
0
 J = []
 ARI = []
 FM = []
 F1 = []
 Hubert = []
 K = []
 RT = []
 CD = []
 SS = []
 NMI = []
 Num = 20
 for _ in tqdm(range(Num)):
     net = Net([graph, g[0][0], g[1][0]], parser)
     pred = train(net, graph, parser, [features, g[0][1], g[1][1]])
     pred = np.argmax(pred, axis=1)
     AACC, RR, JJ, AARI, FFM, FF1, HHubert, KK, RRT, CCD, SSS, NNMI = metric(
         labels, pred)
     ACC.append(AACC)
     R.append(RR)
     J.append(JJ)
     ARI.append(AARI)
     F1.append(FF1)
     FM.append(FFM)
     Hubert.append(HHubert)
     K.append(KK)
     RT.append(RRT)
     CD.append(CCD)
     SS.append(SSS)
     NMI.append(NNMI)
 end = time.time()
 print('The data is {} and the results of SDCN are as follow:'.format(name))
 print('ACC={}$\pm${}'.format(round(np.mean(ACC), 4), round(np.std(ACC),
Пример #11
0
def run_train(config):
    model_name = f'{config.model}_{config.image_mode}_{config.image_size}'
    if 'FaceBagNet' not in config.model:
        model_name += f'_{config.patch_size}'
    config.save_dir = os.path.join(config.save_dir, model_name)

    initial_checkpoint = config.pretrained_model
    criterion          = softmax_cross_entropy_criterion

    ## setup  -----------------------------------------------------------------------------
    if not os.path.exists(config.save_dir +'/checkpoint'):
        os.makedirs(config.save_dir +'/checkpoint')
    if not os.path.exists(config.save_dir +'/backup'):
        os.makedirs(config.save_dir +'/backup')
    if not os.path.exists(config.save_dir +'/backup'):
        os.makedirs(config.save_dir +'/backup')

    log = Logger()
    log.open(os.path.join(config.save_dir,model_name+'.txt'),mode='a')
    log.write('\tconfig.save_dir      = %s\n' % config.save_dir)
    log.write('\n')
    log.write('\t<additional comments>\n')
    log.write('\t  ... xxx baseline  ... \n')
    log.write('\n')

    ## dataset ----------------------------------------
    log.write('** dataset setting **\n')
    train_dataset = FDDataset(mode = 'train', image_size=config.image_size,
                              fold_index=config.train_fold_index)
    train_loader  = DataLoader(train_dataset,
                                shuffle=True,
                                batch_size  = config.batch_size,
                                drop_last   = True,
                                num_workers = config.num_workers)

    valid_dataset = FDDataset(mode = 'val', image_size=config.image_size,
                              fold_index=config.train_fold_index)
    valid_loader  = DataLoader(valid_dataset,
                                shuffle=False,
                                batch_size  = config.batch_size // 36,
                                drop_last   = False,
                                num_workers = config.num_workers)

    assert(len(train_dataset)>=config.batch_size)
    log.write('batch_size = %d\n'%(config.batch_size))
    log.write('train_dataset : \n%s\n'%(train_dataset))
    log.write('valid_dataset : \n%s\n'%(valid_dataset))
    log.write('\n')
    log.write('** net setting **\n')

    net = get_fusion_model(model_name=config.model, image_size=config.image_size, patch_size=config.patch_size)
    print(net)
    net = torch.nn.DataParallel(net)
    net =  net.cuda()

    if initial_checkpoint is not None:
        initial_checkpoint = os.path.join(config.save_dir +'/checkpoint',initial_checkpoint)
        print('\tinitial_checkpoint = %s\n' % initial_checkpoint)
        net.load_state_dict(torch.load(initial_checkpoint, map_location=lambda storage, loc: storage))

    log.write('%s\n'%(type(net)))
    log.write('\n')

    iter_smooth = 20
    start_iter = 0
    log.write('\n')

    ## start training here! ##############################################
    log.write('** start training here! **\n')
    log.write('                                  |------------ VALID -------------|-------- TRAIN/BATCH ----------|         \n')
    log.write('model_name   lr   iter  epoch     |     loss      acer      acc    |     loss              acc     |  time   \n')
    log.write('----------------------------------------------------------------------------------------------------\n')

    train_loss   = np.zeros(6,np.float32)
    valid_loss   = np.zeros(6,np.float32)
    batch_loss   = np.zeros(6,np.float32)
    iter = 0
    i    = 0

    start = timer()
    #-----------------------------------------------
    optimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()),
                          lr=0.1, momentum=0.9, weight_decay=0.0005)

    sgdr = CosineAnnealingLR_with_Restart(optimizer,
                                          T_max=config.cycle_inter,
                                          T_mult=1,
                                          model=net,
                                          take_snapshot=False,
                                          out_dir=None,
                                          eta_min=1e-3)

    global_min_acer = 1.0
    for cycle_index in range(config.cycle_num):
        print('cycle index: ' + str(cycle_index))
        min_acer = 1.0

        for epoch in range(0, config.cycle_inter):
            sgdr.step()
            lr = optimizer.param_groups[0]['lr']
            print('lr : {:.4f}'.format(lr))

            sum_train_loss = np.zeros(6,np.float32)
            sum = 0
            optimizer.zero_grad()

            for input, truth in train_loader:
                iter = i + start_iter
                # one iteration update  -------------
                net.train()
                input = input.cuda()
                truth = truth.cuda()

                logit = net.forward(input)
                truth = truth.view(logit.shape[0])

                loss  = criterion(logit, truth)
                precision,_ = metric(logit, truth)

                loss.backward()
                optimizer.step()
                optimizer.zero_grad()

                # print statistics  ------------
                batch_loss[:2] = np.array(( loss.item(), precision.item(),))
                sum += 1
                if iter % iter_smooth == 0:
                    train_loss = sum_train_loss/sum
                    sum = 0

                i = i + 1

            if epoch >= config.cycle_inter // 2:
                net.eval()
                valid_loss, _ = do_valid_test(net, valid_loader, criterion)
                net.train()

                if valid_loss[1] < min_acer and epoch > 0:
                    min_acer = valid_loss[1]
                    ckpt_name = config.save_dir + '/checkpoint/Cycle_' + str(cycle_index) + '_min_acer_model.pth'
                    torch.save(net.state_dict(), ckpt_name)
                    log.write('save cycle ' + str(cycle_index) + ' min acer model: ' + str(min_acer) + '\n')

                if valid_loss[1] < global_min_acer and epoch > 0:
                    global_min_acer = valid_loss[1]
                    ckpt_name = config.save_dir + '/checkpoint/global_min_acer_model.pth'
                    torch.save(net.state_dict(), ckpt_name)
                    log.write('save global min acer model: ' + str(min_acer) + '\n')

            asterisk = ' '
            log.write(model_name+' Cycle %d: %0.4f %5.1f %6.1f | %0.6f  %0.6f  %0.3f %s  | %0.6f  %0.6f |%s \n' % (
                cycle_index, lr, iter, epoch,
                valid_loss[0], valid_loss[1], valid_loss[2], asterisk,
                batch_loss[0], batch_loss[1],
                time_to_str((timer() - start), 'min')))

        ckpt_name = config.save_dir + '/checkpoint/Cycle_' + str(cycle_index) + '_final_model.pth'
        torch.save(net.state_dict(), ckpt_name)
        log.write('save cycle ' + str(cycle_index) + ' final model \n')
def main():
    htmlFileDir = '../../data/cleanEval'
    htmlFileDir = '../../data/SSD/Big5/techweb.com'
    #htmlFileDir = '../../data/SSD/myriad40'
    '''
    for num in range(0, 10):
        htmlFilePath = path.join(htmlFileDir, str(num+1)+'.html')
        try:
            (oLinkMatrix, oGroundList, oClusterIndex) = genMatrix([bs(open(htmlFilePath))])
            scaler = MinMaxScaler()
            linkMatrix = scaler.fit_transform(oLinkMatrix)
            est = KMeans(n_clusters=2)
            y = est.fit_predict(linkMatrix)
            print metric(y, oGroundList)
        except:
            print(tb.format_exc())
            continue
    '''
    domList = []
    total = 0
    testRatio = 0.5
    search = False
    for num in range(0, 100):
        htmlFilePath = path.join(htmlFileDir, str(num + 1) + '.html')
        try:
            domList.append(bs(open(htmlFilePath)))
            total += len(getLink(domList[-1]))
        except:
            continue
    print(total)
    (oLinkMatrix, oGroundList, oClusterIndex) = genMatrix(domList)

    dataList = [[oLinkMatrix[i], oGroundList[i]]
                for i in range(0, len(oGroundList))]
    job_n = 16
    trainRatioList = [
        float(i) / 100 for i in range(1, 11) + range(10, 101, 10)
    ]
    for trainRatio in trainRatioList:
        turnNum = 100
        turn = 0
        precision = recall = f1_score = accuracy = 0
        while turn < turnNum:
            try:
                random.shuffle(dataList)
                linkMatrix = [dataList[i][0] for i in range(0, len(dataList))]
                groundList = [dataList[i][1] for i in range(0, len(dataList))]
                testBound = int(testRatio * len(groundList))
                upperBound = int(trainRatio * (1 - testRatio) *
                                 len(groundList))
                scaler = StandardScaler()
                linkMatrix = scaler.fit_transform(linkMatrix)
                grid = None
                if search:
                    C_range = np.logspace(-2, 10, 13)
                    gamma_range = np.logspace(-9, 3, 13)
                    param_grid = dict(gamma=gamma_range, C=C_range)
                    cv = StratifiedShuffleSplit(groundList[0:upperBound],
                                                n_iter=5,
                                                test_size=0.2,
                                                random_state=42)
                    grid = GridSearchCV(SVC(),
                                        param_grid=param_grid,
                                        cv=cv,
                                        n_jobs=job_n)
                    grid.fit(linkMatrix[0:upperBound],
                             groundList[0:upperBound])
                    clf = SVC(C=grid.best_params_['C'],
                              gamma=grid.best_params_['gamma'])
                else:
                    C = float(upperBound / sum(groundList[0:upperBound]))
                    clf = SVC(C=1, gamma=0.10, kernel='rbf')
                clf.fit(linkMatrix[0:upperBound], groundList[0:upperBound])
                predict = clf.predict(linkMatrix[testBound + 1:])
                tmp = metric(predict, groundList[testBound + 1:])
                precision += tmp[0]
                recall += tmp[1]
                f1_score += tmp[2]
                accuracy += tmp[3]
                turn += 1
            except:
                continue
        print "%s, %s, %s, %s" % (precision / turnNum, recall / turnNum,
                                  f1_score / turnNum, accuracy / turnNum)
Пример #13
0
 g.add_edges_from(data)
 del data
 k_clusters = len(np.unique(labels))
 parameters = Parameters(beta=0.5,
                         gamma=0.5,
                         fun='softmax',
                         d=k_clusters,
                         k=k_clusters,
                         layers=[200, 170, 140, 100],
                         cat='CAT')  # Set the parameters of SDGE 100
 num = 20
 JJ = []
 FMM = []
 FF1 = []
 KK = []
 print('The implementation is running on', (chr(0x266B) + ' ') * 45)
 for t in tqdm(range(num)):
     model = SDGE(g, X, parameters, t + 1)
     Z = model.fit()
     y_pred = model.predict(Z)
     J, FM, F1, K = metric.metric(labels, y_pred)
     JJ.append(J)
     FMM.append(FM)
     FF1.append(F1)
     KK.append(K)
 print('J=', round(np.mean(JJ), 4), '$\pm$', round(np.std(JJ), 4))
 print('FM=', round(np.mean(FMM), 4), '$\pm$', round(np.std(FMM), 4))
 print('F1=', round(np.mean(FF1), 4), '$\pm$', round(np.std(FF1), 4))
 print('K=', round(np.mean(KK), 4), '$\pm$', round(np.std(KK), 4))
 end = time.time()
 print('The time cost is', (end - start) / num)
Пример #14
0
def main(args):
    if args.tag:
        args.tag= args.model_dataset+time.strftime("%m%d%H%M%S",time.localtime())+"-"+args.tag
    else:
        # args.tag = args.model_dataset+time.strftime("%m%d%H%M%S",time.localtime())
        args.tag = args.model_dataset

    if args.setting:
        funcs = dir(hparams)
        if args.setting not in funcs:
            raise ValueError("Unknown setting %s"%args.setting)
        
        print("Loading predefined hyperparameter setting %s"%args.setting)
        args = getattr(hparams, args.setting)(args)

    print(args)
    train_iter, valid_iter, test_iter = get_dataloader(args.dataset, args.batch_size, n_vocab=args.n_vocab, cached_path=args.data_cache,share_embed=args.share_embed)
    n_vocab=None
    if args.share_embed:
        n_src_vocab = train_iter.dataset.src_lang.size
        n_trg_vocab = n_src_vocab
        print("# of vocabulary: %d"%n_src_vocab)
    else:
        n_src_vocab = train_iter.dataset.src_lang.size
        n_trg_vocab = train_iter.dataset.trg_lang.size
        print("# of source vocabulary: %d"%n_src_vocab)
        print("# of target vocabulary: %d"%n_trg_vocab)
    
    args.n_src_vocab = n_src_vocab
    args.n_trg_vocab = n_trg_vocab

    model = load_model(args)
    if args.init:
        print("apply weight initialization method: %s"%args.init)
        init_weight(args, model)
        
    if args.label_smooth>0:
        print("using Cross Entropy Loss with Label Smoothing factor %f"%args.label_smooth)
        loss_fn = CEWithLabelSmoothing(n_trg_vocab,label_smoothing=args.label_smooth, ignore_index=C.PAD)
        ce_loss_fn = torch.nn.CrossEntropyLoss(ignore_index=C.PAD)
    else:
        print("using Cross Entropy Loss")
        loss_fn = torch.nn.CrossEntropyLoss(ignore_index=C.PAD)
        ce_loss_fn = loss_fn
    
    # optimizer related stuff
    if args.optim=="adam":
        print("Optimizer: ", args.optim)
        optim = torch.optim.Adam(model.parameters(),lr=args.lr)
    elif args.optim =="adamw":
        optim = torch.optim.AdamW(model.parameters(), lr=args.lr)
    else:
        raise ValueError("Unknown optimizer type: %s"%args.optim)

    # learning rate scheduler realated stuff
    if args.lr_decay == "noam":
        print("Learning rate scheduler: ",args.lr_decay)
        scheduler = Noam(optim, args.warmup, args.d_model)
    elif args.lr_decay =="none":
        scheduler = torch.optim.lr_scheduler.StepLR(optim, float("inf"))
    else:
        raise ValueError("Unknown lr dacay type: %s"%args.lr_decay)
    
    if args.warm_start:
        args, model, optim = load_ckpt(args.warm_start, optim)
    # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optim, verbose=True)
    
    # if args.warm_start:
    #     args, model, optim = load_ckpt(args.warm_start, optim)

    # model.to(args.device)
    model.cuda()
    print(f'[!] model:')
    print(model)

    # show_training_profile(args, model)
    
    best_valid_loss = float("inf")
    best_ckpt=-1
    best_ckpt_path = os.path.join(C.model_save_path, args.tag, "ckpt.ep.best")
    patience_cnt=0
    print("="*10+"start_training"+"="*10)
    for i_epoch in range(args.epoch):
        if args.patience!=-1 and patience_cnt >= args.patience:
            print("MAX PATIENCE REACHED!")
            break

        # loss_record=[]
        pbar = tqdm(train_iter, total=len(train_iter))
        cnt=0
        for i, batch in enumerate(pbar):
            try:
                loss = train_step(model, batch, loss_fn, optim, device=args.device, act_loss_weight=args.act_loss_weight, norm=args.grad_norm)
                lr = optim.param_groups[0]["lr"]
            except:
                exit()

            if isinstance(loss_fn, torch.nn.CrossEntropyLoss):
                ppl = np.exp(loss)
                pbar.set_description("[%3d/%3d] PPL: %f, lr: %.6f"%(i_epoch,args.epoch, ppl, lr))
            elif isinstance(loss_fn, CEWithLabelSmoothing):
                pbar.set_description("[%3d/%3d] KL: %f, lr: %.6f"%(i_epoch,args.epoch,loss, lr))
            
            scheduler.step()
            # tensorboard related stuff
            # writer.add_scalar('training loss', loss, i_epoch * len(train_iter) + i)
            # writer.add_scalar('training ppl', ppl, i_epoch * len(train_iter) + i)

        # ppl = np.exp(np.mean(loss_record))
        # print("Epoch: [%2d/%2d], Perplexity: %f, loss: %f"%(i_epoch, args.epoch, ppl, np.mean(loss_record)))

        if not args.debug:
            os.makedirs(os.path.join(C.model_save_path, args.tag), exist_ok=True)
            model_save_path = os.path.join(C.model_save_path, args.tag, "ckpt.ep.%d"% i_epoch)
            if args.share_embed:
                save_ckpt(model_save_path, args, model, optim, train_iter.dataset.src_lang)
            else:
                save_ckpt(model_save_path,args,model,optim,[train_iter.dataset.src_lang, train_iter.dataset.trg_lang])
            
            print("model saved at %s"%model_save_path)

        valid_loss = valid_step(model, valid_iter, ce_loss_fn, device=args.device)
        if not args.debug and valid_loss < best_valid_loss:
            print("new best checkpoint!")
            if os.path.exists(best_ckpt_path):
                os.remove(best_ckpt_path)
            os.symlink("ckpt.ep.%d"%i_epoch, best_ckpt_path)
            best_valid_loss = valid_loss
            best_ckpt = i_epoch
            patience_cnt=0
        else:
            patience_cnt+=1

        if isinstance(ce_loss_fn, torch.nn.CrossEntropyLoss):
            print("Validation Perplexity: %f"%(np.exp(valid_loss)))
        else:
            print("Validation loss: %f"%valid_loss)
    
    print("best checkpoint is %d"%best_ckpt)
    _, model, _, _ = load_ckpt(best_ckpt_path, False)
    print("testing...")
    results = batch_evaluate(model, test_iter,train_iter.dataset.src_lang, train_iter.dataset.trg_lang,device=args.device,beam_size=5)
    print("computing BLEU score...")
    bleu_score = metric(results, metric_type="bleu")
    print("BLEU: %f"%bleu_score)
Пример #15
0
                                         B, x_var, y_var, bearing_var)
            clean_agent = clean_tracker_agent([tracker_object])
            temp_sensor_object.set_tracker_objects(clean_agent)
            s.append(temp_sensor_object)

        #Finally, initialize fusion object
        fusion_agent = centralized_fusion(window_size, window_lag,
                                          MAX_UNCERTAINTY, num_sensors,
                                          init_target_estimate_for_fusion,
                                          init_target_cov_for_fusion)

        #measure = measurement(bearing_var)

        episode_condition = True
        n = 0
        metric_obj = metric(1, num_sensors)
        episode_state = []
        for sensor_index in range(0, num_sensors):
            episode_state.append([])
        while episode_condition:
            t[0].update_location()
            if t[0].current_location[0] > scen.x_max or t[0].current_location[
                    0] < scen.x_min or t[0].current_location[
                        1] > scen.y_max or t[0].current_location[
                            1] < scen.y_min:
                break

            #m.append(measure.generate_bearing(t.current_location, s.current_location))
            for sensor_index in range(0, num_sensors):
                s[sensor_index].gen_measurements(t, measurement(bearing_var),
                                                 1, 0)
            np.random.normal(0, 5)
        ]
        A, B = t[0].constant_velocity(1E-10)  # Get motion model
        x_var = t[0].x_var
        y_var = t[0].y_var
        tracker_object = EKF_tracker(init_for_tracker, init_covariance, A, B,
                                     x_var, y_var, bearing_var)
        clean_agent = clean_tracker_agent([tracker_object])
        temp_sensor_object.set_tracker_objects(clean_agent)

        s = temp_sensor_object
        measure = measurement(bearing_var)

        episode_condition = True
        n = 0
        metric_obj = metric(1, 1)
        episode_state = []
        while episode_condition:
            t[0].update_location()
            #m.append(measure.generate_bearing(t.current_location, s.current_location))
            s.gen_measurements(t, measure, 1, 0)
            s.update_track_estimaes()
            #Move the sensor
            input_states = s.move_sensor(scen, params, v_max, coeff, alpha1,
                                         alpha2, alpha1_, alpha2_, sigma)
            episode_state.append(input_states[0])
            #Calculate immediate reward
            s.gen_sensor_reward(MAX_UNCERTAINTY, window_size, window_lag)

            metric_obj.update_truth_estimate_metrics(t, [s])
Пример #17
0
    def evaluate(self, eval_data, test=False, predict=False):
        # ========================================
        #             Validation / Test
        # ========================================
        # After the completion of each training epoch, measure our performance on
        # our validation set.
        # Also applicable to test set.
        t0 = time.time()

        if test:
            if self.load_model_path:
                self.model = torch.load(self.load_model_path +
                                        self.model_name + ".pt")
            elif eval_data == "HiEve":
                self.model = torch.load(self.HiEve_best_PATH)
            else:  # MATRES
                self.model = torch.load(self.MATRES_best_PATH)
            self.model.to(self.cuda)
            print("")
            print("loaded " + eval_data + " best model:" + self.model_name +
                  ".pt")
            if predict == False:
                print("(from epoch " + str(self.best_epoch) + " )")
            print("Running Evaluation on " + eval_data + " Test Set...")
            if eval_data == "MATRES":
                dataloader = self.test_dataloader_MATRES
            else:
                dataloader = self.test_dataloader_HIEVE
        else:
            # Evaluation
            print("")
            print("Running Evaluation on Validation Set...")
            if eval_data == "MATRES":
                dataloader = self.valid_dataloader_MATRES
            else:
                dataloader = self.valid_dataloader_HIEVE

        self.model.eval()

        y_pred = []
        y_gold = []
        y_logits = np.array([[0.0, 1.0, 2.0, 3.0]])
        softmax = nn.Softmax(dim=1)
        # Evaluate data for one epoch
        for batch in dataloader:
            x_sent = batch[3].to(self.cuda)
            y_sent = batch[4].to(self.cuda)
            z_sent = batch[5].to(self.cuda)
            x_position = batch[6].to(self.cuda)
            y_position = batch[7].to(self.cuda)
            z_position = batch[8].to(self.cuda)
            xy = batch[12].to(self.cuda)
            yz = batch[13].to(self.cuda)
            xz = batch[14].to(self.cuda)
            flag = batch[15].to(self.cuda)
            with torch.no_grad():
                if self.finetune:
                    alpha_logits, beta_logits, gamma_logits = self.model(
                        x_sent,
                        y_sent,
                        z_sent,
                        x_position,
                        y_position,
                        z_position,
                        xy,
                        yz,
                        xz,
                        flag,
                        loss_out=None)
                else:
                    with torch.no_grad():
                        x_sent_e = self.my_func(x_sent)
                        y_sent_e = self.my_func(y_sent)
                        z_sent_e = self.my_func(z_sent)
                    alpha_logits, beta_logits, gamma_logits = self.model(
                        x_sent_e,
                        y_sent_e,
                        z_sent_e,
                        x_position,
                        y_position,
                        z_position,
                        xy=xy,
                        yz=yz,
                        xz=xz,
                        flag=flag,
                        loss_out=None)

            if self.dataset == "Joint":
                assert list(alpha_logits.size())[1] == 8
                if eval_data == "MATRES":
                    alpha_logits = torch.narrow(alpha_logits, 1, 4, 4)
                else:
                    alpha_logits = torch.narrow(alpha_logits, 1, 0, 4)
            else:
                assert list(alpha_logits.size())[1] == 4
            # Move logits and labels to CPU
            label_ids = xy.to('cpu').numpy()
            y_predict = torch.max(alpha_logits, 1).indices.cpu().numpy()
            y_pred.extend(y_predict)
            y_gold.extend(label_ids)
            y_logits = np.append(y_logits,
                                 softmax(alpha_logits).cpu().numpy(), 0)

        # Measure how long the validation run took.
        validation_time = format_time(time.time() - t0)
        print("Eval took: {:}".format(validation_time))

        if predict:
            with open(predict, 'w') as outfile:
                if eval_data == "MATRES":
                    numpyData = {
                        "labels":
                        "0 -- Before; 1 -- After; 2 -- Equal; 3 -- Vague",
                        "array": y_logits
                    }
                else:
                    numpyData = {
                        "labels":
                        "0 -- Parent-Child; 1 -- Child-Parent; 2 -- Coref; 3 -- NoRel",
                        "array": y_logits
                    }
                json.dump(numpyData, outfile, cls=NumpyArrayEncoder)
            msg = message(subject=eval_data + " Prediction Notice",
                          text=self.dataset + "/" + self.model_name +
                          " Predicted " + str(y_logits.shape[0] - 1) +
                          " instances. (Current Path: " + os.getcwd() + ")")
            send(msg)  # and send it
            return 0

        if eval_data == "MATRES":
            Acc, P, R, F1, CM = metric(y_gold, y_pred)
            print("  P: {0:.3f}".format(P))
            print("  R: {0:.3f}".format(R))
            print("  F1: {0:.3f}".format(F1))
            if test:
                print("Test result:", file=self.file)
                print("  P: {0:.3f}".format(P), file=self.file)
                print("  R: {0:.3f}".format(R), file=self.file)
                print("  F1: {0:.3f}".format(F1), file=self.file)
                print("  Confusion Matrix", file=self.file)
                print(CM, file=self.file)
                msg = message(subject=eval_data + " Test Notice",
                              text=self.dataset + "/" + self.model_name +
                              " Test results:\n" + "  P: {0:.3f}\n".format(P) +
                              "  R: {0:.3f}\n".format(R) +
                              "  F1: {0:.3f}".format(F1) + " (Current Path: " +
                              os.getcwd() + ")")
                send(msg)  # and send it
            if not test:
                if F1 > self.MATRES_best_micro_F1 or path.exists(
                        self.MATRES_best_PATH) == False:
                    self.MATRES_best_micro_F1 = F1
                    self.MATRES_best_cm = CM
                    ### save model parameters to .pt file ###
                    torch.save(self.model, self.MATRES_best_PATH)
                    return 1

        if eval_data == "HiEve":
            # Report the final accuracy for this validation run.
            cr = classification_report(y_gold, y_pred, output_dict=True)
            rst = classification_report(y_gold, y_pred)
            F1_PC = cr['0']['f1-score']
            F1_CP = cr['1']['f1-score']
            F1_coref = cr['2']['f1-score']
            F1_NoRel = cr['3']['f1-score']
            F1_PC_CP_avg = (F1_PC + F1_CP) / 2.0
            print(rst)
            print("  F1_PC_CP_avg: {0:.3f}".format(F1_PC_CP_avg))
            if test:
                print("  rst:", file=self.file)
                print(rst, file=self.file)
                print("  F1_PC_CP_avg: {0:.3f}".format(F1_PC_CP_avg),
                      file=self.file)
                msg = message(subject=eval_data + " Test Notice",
                              text=self.dataset + "/" + self.model_name +
                              " Test results:\n" +
                              "  F1_PC_CP_avg: {0:.3f}".format(F1_PC_CP_avg) +
                              " (Current Path: " + os.getcwd() + ")")
                send(msg)  # and send it
            if not test:
                if F1_PC_CP_avg > self.HiEve_best_F1 or path.exists(
                        self.HiEve_best_PATH) == False:
                    self.HiEve_best_F1 = F1_PC_CP_avg
                    self.HiEve_best_prfs = rst
                    torch.save(self.model, self.HiEve_best_PATH)
                    return 1
        return 0
Пример #18
0
                init_for_sensor = []
                init_cov_for_sensor = []
                for target_counter in range(0, num_targets):
                    init_for_sensor.append([
                        x[target_counter] + np.random.normal(0, 5),
                        y[target_counter] + np.random.normal(0, 5),
                        np.random.normal(0, .1),
                        np.random.normal(0, .1)
                    ])
                    init_cov_for_sensor.append(np.array(init_covariance))
                fusion_agent = centralized_fusion(len(s), init_for_sensor,
                                                  init_cov_for_sensor)
                ##################

                episode_condition = True
                metric_obj = metric(num_targets, num_sensors)
                n = 0

                while episode_condition:
                    #Update locations of targets

                    for i in range(0, num_targets):
                        t[i].update_location()
                    #generate measurements for each sensor
                    for sensor_index in range(0, num_sensors):
                        s[sensor_index].gen_measurements(t, measure, pd, landa)
                    #Run tracker for each sensor

                    for sensor_index in range(0, num_sensors):
                        #Update tracks
                        s[sensor_index].update_track_estimaes()
Пример #19
0
    def __init__(self, prefix):
        const.__init__(self,prefix + 'unit');

        # #### BEGIN OF UNIT CREATION #### #

        # for each submodule, we pass in self to allow dependence on values
        # previously set.

        # conversion factor
        self.radian = Quantity(1,{'radian':1});
        self.radians = self.radian;
        self.rad = self.radian;
        self.steradian = Quantity(1,{'steradian':1});
        self.steradians = self.steradian;
        self.sr = self.steradian;

        # money
        self.dollar = Quantity(1,{'dollar':1});
        self.dollars = self.dollar;
        self.cent = self.dollar/100.0;
        self.cents = self.cent;


        self.dozen = 12.0;
        self.doz = self.dozen;
        self.dz = self.dozen;

        self.gross = 12.0*self.dozen;
        self.gro = self.gross;
        self.quire = 25.0;
        self.quires = self.quire;
        self.ream = 500.0;
        self.reams = self.ream;
        self.percent = 1.0/100.0;
        self.proof = self.percent/2.0;
        self.karat = 1.0/24.0;
        self.karats = self.karat;
        self.mole = 6.0221367e+23;
        self.moles = self.mole;
        self.mol = self.mole;
        self.pi = 3.14159265358979323846*self.radians;

                # SI units (mks)
        # length
        self.meter = Quantity(1,{'m':1});
        self.meters = self.meter;
        self.m = self.meter;
        self.kilometer = 1000.0*self.meters;
        self.kilometers = self.kilometer;
        self.km = self.kilometer;
        self.decimeter = self.meters/10.0;
        self.decimeters = self.decimeter;
        self.dm = self.decimeter;
        self.centimeter = self.meters/100.0;
        self.centimeters = self.centimeter;
        self.cm = self.centimeter;
        self.millimeter = self.meters/1000.0;
        self.millimeters = self.millimeter;
        self.mm = self.millimeter;
        self.micron = self.meter/1000000.0;
        self.microns = self.micron;
        self.um = self.micron;
        self.nanometer = self.meter/1000000000.0;
        self.nanometers = self.nanometer;
        self.nm = self.nanometer;
        self.decinanometer = self.meter/10000000000.0;
        self.decinanometers = self.decinanometer;
        self.Angstrom = self.decinanometer;
        self.Angstroms = self.Angstrom;
        self.Xunit = 1.00202e-13*self.meters;
        self.Xunits = self.Xunit;
        self.Fermi = self.meter/1000000000000000.0;
        self.Fermis = self.Fermi;

        # area
        self.hectare = 10000.0*self.meter*self.meter;
        self.hectares = self.hectare;
        self.ha = self.hectare;

        # volume
        self.stere = self.meter*self.meter*self.meter;
        self.steres = self.stere;
        self.liter = self.stere/1000.0;
        self.liters = self.liter;
        self.l = self.liter;
        self.milliliter = self.stere/1000000.0;
        self.milliliters = self.milliliter;
        self.ml = self.milliliter;
        self.cc = self.milliliter;

        # mass
        self.kilogram = Quantity(1,{'kg':1});
        self.kilograms = self.kilogram;
        self.kg = self.kilogram;
        self.quintal = 100.0*self.kilograms;
        self.quintals = self.quintal;
        self.doppelzentner = self.quintal;
        self.doppelzentners = self.doppelzentner;
        self.gram = self.kilograms/1000.0;
        self.grams = self.gram;
        self.g = self.gram;
        self.milligram = self.kilogram/1000000.0;
        self.milligrams = self.milligram;
        self.mg = self.milligram;

        # time
        self.second = Quantity(1,{'s':1});
        self.seconds = self.second;
        self.sec = self.second;
        self.s = self.second;
        self.millisecond = self.second/1000.0;
        self.milliseconds = self.millisecond;
        self.ms = self.millisecond;
        self.microsecond = self.second/1000000.0;
        self.microseconds = self.microsecond;
        self.us = self.microsecond;
        self.nanosecond = self.second/1000000000.0;
        self.nanoseconds = self.nanosecond;
        self.picosecond = self.second/1000000000000.0;
        self.picoseconds = self.picosecond;
        self.minute = 60.0*self.seconds;
        self.minutes = self.minute;
        self.min = self.minute;
        self.hour = 60.0*self.minutes;
        self.hours = self.hour;
        self.hr = self.hour;
        self.day = 24.0*self.hours;
        self.days = self.day;
        self.da = self.day;
        self.week = 7.0*self.days;
        self.weeks = self.week;
        self.fortnight = 2.0*self.weeks;
        self.fortnights = self.fortnight;
        self.year = 365.2421896698*self.days;
        self.years = self.year;
        self.yr = self.year;
        self.month = self.year/12.0;
        self.months = self.month;
        self.mo = self.month;
        self.decade = 10.0*self.years;
        self.decades = self.decade;
        self.century = 100.0*self.years;
        self.centuries = self.century;
        self.millenium = 1000.0*self.years;
        self.millenia = self.millenium;
        # temporal frequency
        self.Hertz = 1.0/self.second;
        self.Hz = self.Hertz;
        self.kiloHertz = 1000.0*self.Hertz;
        self.kHz = self.kiloHertz;
        self.megaHertz = 1000000.0*self.Hertz;
        self.MHz = self.megaHertz;
        self.gigaHertz = 1000000000.0*self.Hertz;
        self.GHz = self.gigaHertz;
        self.teraHertz = 1000000000000.0*self.Hertz;
        self.THz = self.teraHertz;
        # spacial frequency
        self.diopter = 1.0/self.meter;
        self.diopters = self.diopter;
        # speed
        self.kph = self.kilometers/self.hour;
        # radioactivity
        self.Becquerel = 1.0/self.second;
        self.Becquerels = self.Becquerel;
        self.Bq = self.Becquerel;
        self.Rutherford = 1000000.0*self.Becquerels;
        self.Rutherfords = self.Rutherford;
        self.Curie = 3.7e+10*self.Becquerels;
        self.Curies = self.Curie;
        self.Ci = self.Curie;
        # force
        self.Newton = self.kilogram*self.meter/(self.second*self.second);
        self.Newtons = self.Newton;
        self.N = self.Newton;
        self.dyne = self.Newton/100000.0;
        self.dynes = self.dyne;
        self.dyn = self.dyne;
        # pressure
        self.Pascal = self.Newton/(self.meter*self.meter);
        self.Pascals = self.Pascal;
        self.Pa = self.Pascal;
        self.Barie = self.Pascal/10.0;
        self.Baries = self.Barie;
        self.Barye = self.Barie;
        self.Baryes = self.Barye;
        self.pieze = 1000.0*self.Pascals;
        self.piezes = self.pieze;
        self.pz = self.pieze;
        self.bar = 10000.0*self.Pascals;
        self.bars = self.bar;
        self.Torr = 133.3224*self.Pascals;
        self.atmosphere = 760.0*self.Torr;
        self.atmospheres = self.atmosphere;
        self.atm = self.atmosphere;
        # energy
        self.Joule = self.Newton*self.meter;
        self.Joules = self.Joule;
        self.J = self.Joule;
        self.erg = self.Joule/10000000.0;
        self.ergs = self.erg;
        self.kiloWatthour = 3600000.0*self.Joules;
        self.kiloWatthours = self.kiloWatthour;
        self.kWh = self.kiloWatthour;
        # power
        self.Watt = self.Joule/self.second;
        self.Watts = self.Watt;
        self.W = self.Watt;
        self.kiloWatt = 1000.0*self.Watts;
        self.kiloWatts = self.kiloWatt;
        self.kW = self.kiloWatt;
        self.megaWatt = 1000000.0*self.Watts;
        self.megaWatts = self.megaWatt;
        self.MW = self.megaWatt;
        self.milliWatt = self.Watt/1000.0;
        self.milliWatts = self.milliWatt;
        self.mW = self.milliWatt;
        self.microWatt = self.Watt/1000000.0;
        self.microWatts = self.microWatt;
        self.uW = self.microWatt;
        self.nanoWatt =  self.Watt/1000000000.0;
        self.nanoWatts = self.nanoWatt;
        self.nW = self.nanoWatt;

        # electrical current
        self.Ampere = Quantity(1,{'A':1});
        self.Amperes = self.Ampere;
        self.A = self.Ampere;
        self.Biot = 10.0*self.Amperes;
        self.Biots = self.Biot;
        self.abAmpere = self.Biot
        self.abAmperes = self.abAmpere
        self.abAmp = self.abAmpere
        self.aA = self.abAmpere
        self.statAmpere = self.Biot * 3.335641e-11 # == Biot * (cm/s)/c
        self.statAmperes = self.statAmpere
        self.statAmp = self.statAmpere
        self.statA = self.statAmpere

        # electrical potential
        self.Volt = self.Watt/self.Ampere;
        self.Volts = self.Volt;
        self.V = self.Volt;
        self.statVolt = self.erg / (self.statAmp * self.s)
        self.statVolts = self.statVolt
        self.statV = self.statVolt
        self.abVolt = (self.dyne * self.cm) / (self.abAmp * self.s)

        # electrical resistance
        self.Ohm = self.Volt/self.Ampere;
        self.Ohms = self.Ohm;
        self.statOhm = self.statVolt/self.statAmpere
        self.abOhm = self.abVolt/self.abAmpere

        # electrical conductance
        self.mho = 1.0/self.Ohm;
        self.mhos = self.mho;
        self.Siemens = self.mho;
        self.S = self.Siemens;

        # electrical charge
        self.Coulomb = self.Ampere*self.second;
        self.Coulombs = self.Coulomb;
        self.C = self.Coulomb;
        self.statCoulomb = self.statAmpere * self.second
        self.statCoulombs = self.statCoulomb
        self.statCoul = self.statCoulomb
        self.statC = self.statCoulomb
        self.abCoulomb = self.abAmpere * self.second
        self.abCoulombs = self.abCoulomb
        self.abCoul = self.abCoulomb
        self.Franklin = self.statCoulombs;
        self.Franklins = self.Franklin;

        # electrical capacity
        self.Farad = self.Coulomb/self.Volt;
        self.Farads = self.Farad;
        self.F = self.Farad;

        # magnetic flux
        self.Weber = self.Volt*self.second;
        self.Webers = self.Weber;
        self.Wb = self.Weber;
        self.Maxwell = self.Weber/100000000.0;
        self.Maxwells = self.Maxwell;
        self.M = self.Maxwell;
        self.statMaxwell = self.statVolt * self.second
        self.statMaxwells = self.statMaxwell
        self.statM = self.statMaxwell
        # magnetic field B
        self.Tesla = self.Weber/(self.meter*self.meter);
        self.Teslas = self.Tesla;
        self.T = self.Tesla;
        #self.Gauss = self.Tesla/10000.0;
        self.Gauss = self.abVolt * self.second / self.cm**2
        self.gamma = self.Tesla/1000000000.0;
        # magnetic field H
        self.Oerstedt = 79.57747*self.Ampere/self.meter; # = Gauss/mu0
        self.Oerstedts = self.Oerstedt;
        self.Oe = self.Oerstedt;
        # magnetic inductance
        self.Henry = self.Weber/self.Ampere;
        self.Henrys = self.Henry;
        self.H = self.Henry;
        self.milliHenry = self.Henry/1000.0;
        self.milliHenrys = self.milliHenry;
        self.mH = self.milliHenry;
        # temperature
        self.Kelvin = Quantity(1,{'K':1});
        self.Kelvins = self.Kelvin;
        self.K = self.Kelvin;
        self.milliKelvin = self.Kelvin*1e-3;
        self.mK = self.milliKelvin;
        self.microKelvin = self.Kelvin*1e-6;
        self.uK = self.microKelvin;
        self.nanoKelvin = self.Kelvin*1e-9;
        self.nK = self.nanoKelvin;
        # luminous intensity
        self.candela = Quantity(1,{'candela':1});
        self.candelas = self.candela;
        self.cd = self.candela;
        self.apostilb = self.candelas/self.meter/self.meter;
        self.apostilbs = self.apostilb;
        self.nit = self.apostilb;
        self.nits = self.nit;
        self.skot = self.apostilb/1000.0;
        self.skots = self.skot;
        self.stilb = 10000.0*self.apostilbs;
        self.stilbs = self.stilb;
        self.Blondel = self.apostilb/self.pi;
        self.Blondels = self.Blondel;
        self.Lambert = 10000.0*self.Blondels;
        self.Lamberts = self.Lambert;
        # light flux
        self.lumen = self.candela*self.steradian;
        self.lumens = self.lumen;
        self.lm = self.lumen;
        # light intensity
        self.lux = self.lumens/self.meter/self.meter;
        self.luxes = self.lux;
        self.luces = self.lux;
        self.lx = self.lux;
        self.nox = self.lux/1000.0;
        self.phot = self.lumens/self.centimeter/self.centimeter;
        self.phots = self.phot;

        # acceleration
        self.Galileo = self.centimeters/self.second/self.second;
        self.Galileos = self.Galileo;
        # standard gravitational acceleration at sea level
        self.gravity = 9.80665*self.meters/self.second/self.second;
        # mass
        self.kilohyl = self.kilogram*self.gravity*self.second*self.second/self.meter;
        self.kilohyls = self.kilohyl;
        self.hyl = self.kilohyl/1000.0;
        self.hyls = self.hyl;

                # English Units
        # length
        self.inch = 0.0254*self.meters;
        self.inches = self.inch;
        #self.in = self.inch;
        self.mil = self.inch/1000.0;
        self.mils = self.mil;
        self.point = self.inch/72.27;
        self.points = self.point;
        self.pt = self.point;
        self.bottommeasure = self.inch/40.0;
        self.bottommeasures = self.bottommeasure;
        self.line = self.inch/12.0;
        self.lines = self.line;
        self.pica = 12.0*self.points;
        self.picas = self.pica;
        self.barleycorn = self.inch/3.0;
        self.barleycorns = self.barleycorn;
        self.finger = 7.0*self.inches/8.0;
        self.fingers = self.finger;
        self.palm = 3.0*self.inches;
        self.palms = self.palm;
        self.hand = 4.0*self.inches;
        self.hands = self.hand;
        self.link = 7.92*self.inches;
        self.links = self.link;
        self.li = self.link;
        self.span = 9.0*self.inches;
        self.spans = self.span;
        self.foot = 12.0*self.inches;
        self.feet = self.foot;
        self.ft = self.foot;
        self.cubit = 18.0*self.inches;
        self.cubits = self.cubit;
        self.yard = 3.0*self.feet;
        self.yards = self.yard;
        self.yd = self.yard;
        self.nail = self.yard/16.0;
        self.nails = self.nail;
        self.ell = 45.0*self.inches;
        self.ells = self.ell;
        self.pace = 5.0*self.feet;
        self.paces = self.pace;
        self.fathom = 6.0*self.feet;
        self.fathoms = self.fathom;
        self.fm = self.fathom;
        self.rod = 198.0*self.inches;
        self.rods = self.rod;
        self.rd = self.rod;
        self.pole = self.rod;
        self.poles = self.pole;
        self.p = self.pole;
        self.perch = self.rod;
        self.perches = self.perch;
        self.rope = 20.0*self.feet;
        self.ropes = self.rope;
        self.bolt = 40.0*self.yards;
        self.bolts = self.bolt;
        self.chain = 4.0*self.rods;
        self.chains = self.chain;
        self.ch = self.chain;
        self.skein = 120*self.yards;
        self.skeins = self.skein;
        self.furlong = 220*self.yards;
        self.furlongs = self.furlong;
        self.spindle = 14400*self.yards;
        self.spindles = self.spindle;

        self.statute      = statute.statute(prefix + 'unit.', self);

        self.parasang = 3.5*self.statute.miles;
        self.parasangs = self.parasang;
        self.arpentcan = 27.52*self.statute.miles;
        self.arpentcans = self.arpentcan;
        self.arpentlin = 191.835*self.feet;
        self.arpentlins = self.arpentlin;
        self.astronomical_unit = 1.49597871e11*self.meters;
        self.astronomical_units = self.astronomical_unit;
        self.AU = self.astronomical_unit;
        self.lightyear = 9.4605e15*self.meters;
        self.lightyears = self.lightyear;
        self.ly = self.lightyear;

        self.arc          = arc.arc(prefix + 'unit.', self);

        self.parsec = self.AU*self.radians/self.arc.second;
        self.parsecs = self.parsec;
        self.pc = self.parsec;
        # area
        self.barn = 1.0e-28*self.meter*self.meter;
        self.barns = self.barn;
        self.b = self.barn;
        self.circular_inch = 0.25*self.pi*self.inch*self.inch;
        self.circular_inches = self.circular_inch;
        self.circular_mil = 0.25*self.pi*self.mil*self.mil;
        self.circular_mils = self.circular_mil;
        self.sabin = self.foot*self.foot;
        self.sabins = self.sabin;
        self.square = 100.0*self.sabin;
        self.squares = self.square;
        self.are = 100.0*self.meter*self.meter;
        self.ares = self.are;
        self.a = self.are;
        self.rood = 40.0*self.rod*self.rod;
        self.roods = self.rood;
        self.ro = self.rood;
        self.acre = 4.0*self.roods;
        self.acres = self.acre;
        self.section = self.statute.mile*self.statute.mile;
        self.sections = self.section;
        self.homestead = self.section/4.0;
        self.homesteads = self.homestead;
        self.township = 36.0*self.sections;
        self.townships = self.township;

        # mass
        self.grain = 0.06479891*self.grams;
        self.grains = self.grain;
        self.gr = self.grain;
        self.pennyweight = 24.0*self.grains;
        self.dwt = self.pennyweight;

        # volume
        self.minim = 6.161152e-8*(self.m*self.m*self.m);
        self.minims = self.minim;
        self.drop = 0.03*self.cc;
        self.drops = self.drop;
        self.teaspoon = 4.928922*self.cc;
        self.teaspoons = self.teaspoon;
        self.tablespoon = 3.0*self.teaspoons;
        self.tablespoons = self.tablespoon;

        self.avoirdupois  = avoirdupois.avoirdupois(prefix + 'unit.', self);
        self.avdp         = self.avoirdupois
        self.av           = self.avoirdupois
        self.US           = US.US(prefix + 'unit.', self);

        self.noggin = 2.0*self.US.liquid.ounces;
        self.noggins = self.noggin;
        self.cup = 8.0*self.US.liquid.ounces;
        self.cups = self.cup;
        self.fifth = self.US.liquid.gallon/5.0;
        self.fifths = self.fifth;
        self.jeroboam = 4.0*self.fifths;
        self.jeroboams = self.jeroboam;
        self.firkin = 9.0*self.US.liquid.gallons;
        self.firkins = self.firkin;
        self.kilderkin = 18.0*self.US.liquid.gallons;
        self.kilderkins = self.kilderkin;
        self.strike = 2.0*self.US.bushels;
        self.strikes = self.strike;
        self.sack = 3.0*self.US.bushels;
        self.sacks = self.sack;
        self.coomb = 4.0*self.US.bushels;
        self.coombs = self.coomb;
        self.seam = 8.0*self.US.bushels;
        self.seams = self.seam;
        self.wey = 40.0*self.US.bushels;
        self.weys = self.wey;
        self.last = 80.0*self.US.bushels;
        self.lasts = self.last;
        self.register_ton = 100.0*(self.ft*self.ft*self.ft);
        self.register_tons = self.register_ton;
        self.register_tn = self.register_ton;
        self.cord = 128.0*(self.ft*self.ft*self.ft);
        self.cords = self.cord;
        self.cordfoot = self.cord;
        self.cordfeet = self.cordfoot;
        self.boardfoot = 144.0*self.inch*self.inch*self.inch;
        self.boardfeet = self.boardfoot;
        self.timberfoot = self.foot*self.foot*self.foot;
        self.timberfeet = self.timberfoot;
        self.hogshead = 2.0*self.US.barrels;
        self.hogsheads = self.hogshead;
        self.pipe = 4.0*self.US.barrels;
        self.pipes = self.pipe;
        self.tun = 8.0*self.US.barrels;
        self.tuns = self.tun;


        self.stone = 14.0*self.avoirdupois.pounds;
        self.stones = self.stone;
        self.st = self.stone;


        self.crith = 0.0906*self.grams;
        self.criths = self.crith;
        self.bag = 94.0*self.avoirdupois.pounds;
        self.bags = self.bag;
        self.cental = 100.0*self.avoirdupois.pounds;
        self.centals = self.cental;
        self.weymass = 252.0*self.avoirdupois.pounds;
        # rate
        self.mgd = 1000000.0*self.US.liquid.gallons/self.day;
        self.cfs = self.foot*self.foot*self.foot/self.second;
        self.minersinch = 1.5*self.foot*self.foot*self.foot/self.minute;
        self.mpg = self.statute.miles/self.US.liquid.gallon;

        self.nautical     = nautical.nautical(prefix + 'unit.', self);

        # speed
        self.mph = self.statute.miles/self.hour;
        self.knot = self.nautical.miles/self.hour;
        self.knots = self.knot;

        # force
        self.poundal = self.avdp.pound*self.foot/(self.second*self.second);
        self.poundals = self.poundal;
        self.pdl = self.poundal;
        self.lbf = self.avoirdupois.pound*self.gravity;
        # pressure
        self.psi = self.lbf/(self.inch*self.inch);
        # energy
        self.calorie = 4.1868*self.Joules;
        self.calories = self.calorie;
        self.cal = self.calorie;
        self.kilocalorie = 1000.0*self.calories;
        self.kilocalories = self.kilocalorie;
        self.kcal = self.kilocalorie;
        self.Frigorie = self.kilocalorie;
        self.Frigories = self.Frigorie;
        self.Btu = 1055.06*self.Joules;
        self.therm = 10000.0*self.Btu;
        self.therms = self.therm;
        self.thermie = 1000000.0*self.calories;
        self.thermies = self.thermie;
        # power
        self.horsepower = 735.49875*self.Watts;
        self.HP = self.horsepower;
        # electrical current
        self.Gilbert = 0.795775*self.Amperes;
        self.Gilberts = self.Gilbert;
        # temperature
        self.Rankin = 1.8*self.Kelvins;
        self.Rankins = self.Rankin;
        self.R= self.Rankin;
        # luminous intensity
        self.candle = 1.02*self.candelas;
        self.candles = self.candle;
        # light intensity
        self.foot_candle = self.lumens/self.foot/self.foot;
        self.foot_candles = self.foot_candle;
        self.fc = self.foot_candle;
        self.foot_Lambert = self.candelas/self.foot/self.foot/self.pi;
        self.foot_Lamberts = self.foot_Lambert;



        # and now load all of the sub-modules that don't have
        # interdependencies with other unit stuff.
        self.admiralty    = admiralty.admiralty(prefix + 'unit.', self);
        self.apothecary   = apothecary.apothecary(prefix + 'unit.', self);
        self.bakers       = bakers.bakers(prefix + 'unit.', self);
        self.British      = British.British(prefix + 'unit.', self);
        self.displacement = displacement.displacement(prefix + 'unit.', self);
        self.dose         = dose.dose(prefix + 'unit.', self);
        self.engineers    = engineers.engineers(prefix + 'unit.', self);
        self.equivalent   = equivalent.equivalent(prefix + 'unit.', self);
        self.geodetic     = geodetic.geodetic(prefix + 'unit.', self);
        self.geographical = geographical.geographical(prefix + 'unit.', self);
        self.Gunters      = Gunters.Gunters(prefix + 'unit.', self);
        self.Hefner       = Hefner.Hefner(prefix + 'unit.', self);
        self.metric       = metric.metric(prefix + 'unit.', self);

        # #### END OF UNIT CREATION #### #

        # do some trickery to get modules set to instantiated classes
        # We expect each submodule to do likewise for those not already taken
        # care of here.
        self.admiralty.__name__ = admiralty.__name__
        sys.modules[admiralty.__name__] = self.admiralty

        self.apothecary.__name__ = apothecary.__name__
        sys.modules[apothecary.__name__] = self.apothecary
        sys.modules[ap.__name__] = self.apothecary
        sys.modules[troy.__name__] = self.apothecary
        sys.modules[t.__name__] = self.apothecary

        self.arc.__name__ = arc.__name__
        sys.modules[arc.__name__] = self.arc

        self.avoirdupois.__name__ = avoirdupois.__name__
        sys.modules[avoirdupois.__name__] = self.avoirdupois
        # fix dummies for avoirdupois
        sys.modules[avdp.__name__] = self.avoirdupois
        sys.modules[av.__name__] = self.avoirdupois

        self.bakers.__name__ = bakers.__name__
        sys.modules[bakers.__name__] = self.bakers

        self.British.__name__ = British.__name__
        sys.modules[British.__name__] = self.British
        sys.modules[English.__name__] = self.British
        sys.modules[Imperial.__name__] = self.British

        self.displacement.__name__ = displacement.__name__
        sys.modules[displacement.__name__] = self.displacement

        self.dose.__name__ = dose.__name__
        sys.modules[dose.__name__] = self.dose

        self.engineers.__name__ = engineers.__name__
        sys.modules[engineers.__name__] = self.engineers

        self.equivalent.__name__ = equivalent.__name__
        sys.modules[equivalent.__name__] = self.equivalent

        self.geodetic.__name__ = geodetic.__name__
        sys.modules[geodetic.__name__] = self.geodetic

        self.geographical.__name__ = geographical.__name__
        sys.modules[geographical.__name__] = self.geographical

        self.Gunters.__name__ = Gunters.__name__
        sys.modules[Gunters.__name__] = self.Gunters

        self.Hefner.__name__ = Hefner.__name__
        sys.modules[Hefner.__name__] = self.Hefner

        self.metric.__name__ = metric.__name__
        sys.modules[metric.__name__] = self.metric

        self.nautical.__name__ = nautical.__name__
        sys.modules[nautical.__name__] = self.nautical
        sys.modules[marine.__name__] = self.nautical

        self.statute.__name__ = statute.__name__
        sys.modules[statute.__name__] = self.statute

        self.US.__name__ = US.__name__
        sys.modules[US.__name__] = self.US
Пример #20
0
def diameter2(pipes):
    """
    Inference on the diameter of all nan values in the pipes dataframe.
    
    Inputs: pipes - Pandas dataframe (nxn Dataframe),
    Outputs: diameter inference with distances (2xn np.array)
    """
    # Separate known from unknown
    unknown = pipes[pipes.diameter.isnull()]
    unknown.reset_index(inplace=True)
    known = pipes[pipes.diameter.isnull() != True]

    # Initialize distance guesses
    dia_dists = []
    check = []

    # Iterate through the pipes with nans
    for i in unknown.index:
        unk_data = unknown.iloc[i]

        # Find subset of pipes to consider
        domain = known[((known.startx > unk_data['startx'] - 1000) &
                        (known.startx < unk_data['startx'] + 1000) &
                        (known.starty > unk_data['starty'] - 1000) &
                        (known.starty < unk_data['starty'] + 1000)) |
                       ((known.endx > unk_data['endx'] - 1000) &
                        (known.endx < unk_data['endx'] + 1000) &
                        (known.endy > unk_data['endy'] - 1000) &
                        (known.endy < unk_data['endy'] + 1000)) |
                       ((known.startx > unk_data['endx'] - 1000) &
                        (known.startx < unk_data['endx'] + 1000) &
                        (known.starty > unk_data['endy'] - 1000) &
                        (known.starty < unk_data['endy'] + 1000)) |
                       ((known.endx > unk_data['startx'] - 1000) &
                        (known.endx < unk_data['startx'] + 1000) &
                        (known.endy > unk_data['starty'] - 1000) &
                        (known.endy < unk_data['starty'] + 1000))]

        # Initialize distance and diameter
        dist = 100000
        diam_guess = 0
        diams = []
        dists = []

        # Iterate through the subset of pipes
        for j in domain.reset_index().index:
            # Find the metric distance between the pipe and selected pipe
            met_dist = metric.metric(unk_data, domain.iloc[j], domain)

            # Penalize for not having the same (True * 10)
            if unk_data['material'] != domain.iloc[j]['material']:
                met_dist += 18
            if 'installyear' in pipes.columns:
                if unk_data['installyear'] != domain.iloc[j]['installyear']:
                    met_dist += 18

            # Check against previous distance
            if met_dist < dist:
                dist = met_dist
                diam_guess = domain.iloc[j]['diameter']
            diams.append(domain.iloc[j]['diameter'])
            dists.append(met_dist)

        # Closest diameters
        close = np.argsort(dists)
        voters = np.array(diams)[close][:5]
        best_diam = Counter(voters).most_common()[0][0]

        check.append(best_diam)

        dia_dists.append((diam_guess, dist))

    # Return the dia_dists list
    return np.array(dia_dists), np.array(check)
Пример #21
0
def main(arg):

    alternating,pre_training_batches,combined_cost_function,iteration,batch_size,run_id = arg

    #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)
    #sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
    sess = tf.Session()
    input_size = mnist.train.images.shape[1]

    x = tf.placeholder(tf.float32, [None, input_size])
    y = tf.placeholder(tf.float32,[None,10])
    sizes = [700,600,500,400,300,200,100,50]

    autoencoder = create(x,y,sizes)
    init = tf.initialize_all_variables()
    sess.run(init)
    dual_train_step  = tf.train.GradientDescentOptimizer(0.5).minimize(autoencoder['cost_total'])
    class_train_step = tf.train.GradientDescentOptimizer(0.5).minimize(autoencoder['cost_class'])
    auto_train_step  = tf.train.GradientDescentOptimizer(0.5).minimize(autoencoder['cost_autoencoder'])

    c1_axis = np.zeros(0)
    c2_axis = np.zeros(0)
    c3_axis = np.zeros(0)
    x_axis = np.zeros(0)

    if pre_training_batches > 0:
        """
        PRETRAIN
        """
        print 'pre-train autoencoder:'
        for i in tqdm(range(pre_training_batches)):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            sess.run(auto_train_step, feed_dict={x: batch_xs, y: batch_ys})


    # do 1000 training stepscomp
    # print 'i\ttot\tclass\tauto'
    for i in tqdm(range(iteration)):
        # Train classifier
        batch_xs, batch_ys = mnist.train.next_batch(batch_size)

        if combined_cost_function:
            sess.run(dual_train_step, feed_dict={x: batch_xs, y: batch_ys})
        else:
            sess.run(class_train_step, feed_dict={x: batch_xs, y: batch_ys})


        if alternating:
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            sess.run(auto_train_step, feed_dict={x: batch_xs, y: batch_ys})

        if i % 100 == 0:
            batch_xs, batch_ys = mnist.validation.next_batch(batch_size)
            c1 = sess.run(autoencoder['cost_total'], feed_dict={x: batch_xs, y: batch_ys})
            c2 = sess.run(autoencoder['cost_class'], feed_dict={x: batch_xs, y: batch_ys})
            c3 = sess.run(autoencoder['cost_autoencoder'], feed_dict={x: batch_xs, y: batch_ys})
            # print 'wtf', c
            x_axis = np.append(x_axis,i)
            c1_axis = np.append(c1_axis,c1)
            c2_axis = np.append(c2_axis,c2)
            c3_axis = np.append(c3_axis,c3)

            # print i,
            # print c,
            # print sess.run(autoencoder['cost_class'], feed_dict={x: batch_xs, y: batch_ys}),
            # print sess.run(autoencoder['cost_autoencoder'], feed_dict={x: batch_xs, y: batch_ys})

            # print i, " original", batch[0]
            # print i, " decoded", sess.run(autoencoder['decoded'], feed_dict={x: batch})

    # compare(sess,mnist,2)
    compareall(sess,mnist,autoencoder,x,save=True,show=False,run_id=run_id)
    fig = plt.figure()
    plt.plot(x_axis,c1_axis,label='cost_total')
    plt.plot(x_axis,c2_axis,label='cost_class')
    plt.plot(x_axis,c3_axis,label='cost_autoencoder')
    # plt.show()
    plt.legend()
    plt.savefig('graph' + str(run_id))
    np.savetxt('costs'+str(run_id)+'.dat',np.array([x_axis,c1_axis,c2_axis,c3_axis]))
    metric.metric(autoencoder,sess,y,mnist,x,'log'+str(run_id)+'.txt')
    sess.close()
Пример #22
0
            n_trg_vocab = train_iter.dataset.trg_lang.size
            print("# of source vocabulary: %d"%n_src_vocab)
            print("# of target vocabulary: %d"%n_trg_vocab)

        args.n_src_vocab = n_src_vocab
        args.n_trg_vocab = n_trg_vocab

        model = load_model(args)
        model.cuda()
        
        # load the model weights
        best_ckpt_path = model_path
        _, model, _, _ = load_ckpt(best_ckpt_path, False)
        results = batch_evaluate(model, test_iter,train_iter.dataset.src_lang, train_iter.dataset.trg_lang,device=args.device,beam_size=5, verbose=True, sample_file='samples/sample.txt')
        print("computing BLEU score...")
        bleu_score = metric(results, metric_type="bleu")
        print("BLEU: %f"%bleu_score)
    elif args.mode == 'eval':
        # evaluate the samples.txt
        # load file
        with open('samples/sample.txt') as f:
            tgt, ref = [], []
            for idx, i in tqdm(enumerate(f.readlines())):
                i = i.replace('<user0>', '').replace('<user1>', '').replace('-trg:', '').replace('-hyp:', '').strip()
                if idx % 4 == 1:
                    ref.append(i.split())
                elif idx % 4 == 2:
                    tgt.append(i.split())
        assert len(ref) == len(tgt), 'wrong with the sample.txt file'
        
        # performance