Exemplo n.º 1
0
    def __init__(self, classifier, training_data, attributes):
        self.classifier = classifier
        self.temp_data = training_data
        self.attributes = attributes
        self.masterList = []

        count = 0
        N = self.temp_data.__len__()
        # can set this yourself
        for i in range(50):
            rand = np.random.randint(0, N, N * 4 / float(5))
            training_data = dataset.DataSet()
            testing_data = dataset.DataSet()
            for i in range(N):
                if i in rand:
                    training_data.append(self.temp_data.all_examples[i])

            self.masterList.append(
                self.iteration(training_data, self.attributes))
            self.dump()

        #print(self.path)
        return
Exemplo n.º 2
0
def __test_datasets():
    for dset, cat in [('std_gr-qc', 'import'), ('eg1k_rnd_std','gr-qc'), ('eg1k_chr_prc','gr-qc'),
            ('eg1k_chr_prc', 'math.FA'), ('eg1k_rnd_std', 'gr-qc'), ('eg1k_chr_frm', 'math.GN'),
            ('eg1k_chr_prc', 'math.GN'), ('eg1k_rnd_kcv', 'math.GN'), ('eg1k_chr_10prc', 'math.FA'),
            ('eg1k_chr_5prc', 'math.FA'), ('eg1k_rnd_std','math.FA')]:
        print("{}> {}".format(cat,dset))
        data = dataset.DataSet('../datasets/', cat, dset)
        test_ed = data.get_test_edges()
        print(" Rozmiar:", data.vx_count, "({}/{})".format(data.train_size,data.test_size))
        matrix = data.get_training_set(mode='adjacency_matrix_csc')
        graph = merw.matrix_to_graph(matrix)

        graph_stats(graph, edges_to_deg(test_ed, data.vx_count), len(test_ed))
        print()
Exemplo n.º 3
0
def drawScatter():
    ds=dataset.DataSet()
    ds.getTrainData()
    mouses=ds.train["mouses"]
    goals=ds.train["goals"]
    dw=datadraw.DataDraw("2d")
    mouses_start=ds.getPosOfMouse(1)
    dw.drawbatchgoal(mouses_start[:2600],'y')
    dw.drawbatchgoal(mouses_start[2600:],'b')

    dw.drawbatchgoal(goals[:2600],'y')
    dw.drawbatchgoal(goals[2600:],'b')
    
    plt.show()
Exemplo n.º 4
0
def test_model():
    '''
    使用测试数据集对训练好的模型进行测试
    :return: 测试结果
    '''
    data = dataset.DataSet('../data/dataset', CLASS_LIST, True)
    test_x, test_y = data.get_test_data()

    tf.reset_default_graph()
    with tf.name_scope('input'):
        x = tf.placeholder(tf.float32, [None, 1200])
        y = tf.placeholder(tf.float32, [None, CLASS_NUM])
    y_, keep_prob = fall_net(x)

    with tf.name_scope('accuracy'):
        correct_prediction = tf.equal(tf.argmax(y_, 1), tf.argmax(y, 1))
        correct_prediction = tf.cast(correct_prediction, tf.float32)
        accuracy = tf.reduce_mean(correct_prediction)

    start_time = time.time()

    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess, "../model/model.ckpt")
        p_y = np.argmax(sess.run(y_, feed_dict={x: test_x, keep_prob: 1.0}), 1)
        print("准确率为 %f" % accuracy.eval(feed_dict={
            x: test_x,
            y: test_y,
            keep_prob: 1.0
        }))

    test_time = str(time.time() - start_time)
    print('测试时间为:', test_time)

    g_truth = np.argmax(test_y, 1)
    avg_sensitivity = 0
    avg_specificity = 0

    for i in range(CLASS_NUM):
        accuracy, sensitivity, specificity = evaluate(p_y, g_truth, i)
        print('class:%10s,accuracy =%05f,sensitivity =%05f,specificity =%05f' %
              (Label[CLASS_LIST[i]], accuracy, sensitivity, specificity))
        avg_sensitivity += sensitivity
        avg_specificity += specificity

    avg_sensitivity = avg_sensitivity / CLASS_NUM
    avg_specificity = avg_specificity / CLASS_NUM

    print('avg_sensitivity=%05f,avg_specificity=%05f' %
          (avg_sensitivity, avg_specificity))
Exemplo n.º 5
0
def run_train():

    test_config = lstmmodel.ModelConfig()
    #test_config.batch_size = 1
    test_config.batch_size = lstmmodel.TEST_DATA_SIZE

    Session_config = tf.ConfigProto(allow_soft_placement=True)
    Session_config.gpu_options.allow_growth = True

    with tf.Graph().as_default(), tf.Session(config=Session_config) as sess:
        with tf.device('/gpu:1'):
            initializer = tf.random_uniform_initializer(
                -test_config.init_scale, test_config.init_scale)
            with tf.variable_scope("model",
                                   reuse=None,
                                   initializer=initializer):
                model_test = lstmmodel.LSTMModel(test_config,
                                                 'test',
                                                 is_training=False)

            data_test = dataset.DataSet(FLAGS.file_path_test,
                                        FLAGS.data_root_dir,
                                        lstmmodel.TEST_DATA_SIZE,
                                        test_config.num_steps,
                                        test_config.feature_size,
                                        is_train_set=False)

            test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test')

            sess.run(tf.global_variables_initializer())
            saver = tf.train.Saver(max_to_keep=100)
            last_epoch = load_model(sess, saver, FLAGS.saveModelPath)
            print('start: ', last_epoch + 1)

            test_accury_1, test_accury_5, test_loss = run_epoch(
                sess,
                model_test,
                data_test,
                tf.no_op(),
                2,
                test_writer,
                istraining=False)
            print(
                "Test accury(top 1): %.3f Test accury(top 5): %.3f Loss %.3f" %
                (test_accury_1, test_accury_5, test_loss))

            test_writer.close()

            print("Training step is compeleted!")
Exemplo n.º 6
0
def main():
    model_names = [
        '2018-05-04--22-57-49',
        '2018-05-04--23-03-46',
        '2018-05-07--17-22-10',
        '2018-05-08--23-37-07',
        '2018-05-11--00-10-54'
    ]

    # counting ROC on first 20 samples from randomized dataset
    # Network.BATCH_SIZE = len(images)
    Network.BATCH_SIZE = 30
    data_set = dataset.DataSet(Network.BATCH_SIZE)

    images, voxelmaps, _ = data_set.csv_inputs_voxels(Network.TRAIN_FILE)
    images_test, voxelmaps_test, _ = data_set.csv_inputs_voxels(Network.TEST_FILE)

    config = tf.ConfigProto(
        device_count={'GPU': 0}
    )
    with tf.Session(config=config) as sess:
        batch_rgb, batch_voxels = sess.run(
            [images, voxelmaps])
        batch_rgb_test, batch_voxels_test = sess.run(
            [images, voxelmaps])
    print('evaluation dataset loaded')

    with open('evaluate/roc-dump-gt.rick', 'wb+') as f:
        pickle.dump(batch_voxels, f)
    with open('evaluate/roc-dump-gt-test.rick', 'wb+') as f:
        pickle.dump(batch_voxels_test, f)

    results = predict_voxels(batch_rgb, batch_voxels, model_names)
    with open('evaluate/roc-dump-train.rick', 'wb+') as f:
        pickle.dump(results, f)
    # for model_name, res in results.items():
    #     pred_voxels, fn_val, tn_val, tp_val, fp_val = res
    #     calc_and_persist_roc(pred_voxels, batch_voxels, model_name, 'train')
    #     with open('evaluate/rates-{}-{}.rick'.format(model_name, 'train'), 'wb+') as f:
    #         pickle.dump((fn_val, tn_val, tp_val, fp_val), f)

    results_test = predict_voxels(batch_rgb_test, batch_voxels_test, model_names)
    with open('evaluate/roc-dump-test.rick', 'wb+') as f:
        pickle.dump(results_test, f)
    for model_name, res in results_test.items():
        pred_voxels, fn_val, tn_val, tp_val, fp_val = res
        calc_and_persist_roc(pred_voxels, batch_voxels_test, model_name, 'test')
        with open('evaluate/rates-{}-{}.rick'.format(model_name, 'test'), 'wb+') as f:
            pickle.dump((fn_val, tn_val, tp_val, fp_val), f)
Exemplo n.º 7
0
def main():
    train_set = dataset.DataSet(pathlib.Path("/home/dominik/workspace/duckietown_imitation_learning/train_images"))
    test_set = dataset.DataSet(pathlib.Path("/home/dominik/workspace/duckietown_imitation_learning/test_images"))

    train_loader = torch.utils.data.DataLoader(train_set, batch_size=200, shuffle=True)
    test_loader = torch.utils.data.DataLoader(test_set, batch_size=100, shuffle=False)

    device = "cuda:0"

    net = Net()
    net.apply(weights_init)
    net.to(device)

    criterion = nn.MSELoss()
    optimizer = torch.optim.SGD(net.parameters(), lr=0.01, momentum=0.85, weight_decay=0.0005)

    running_loss = 0

    for epoch in range(100):
        for i, (lbls, imgs) in enumerate(train_loader):
            optimizer.zero_grad()

            lbls = lbls.to(device)
            imgs = imgs.to(device)

            outputs = net(imgs)

            loss = criterion(outputs, lbls)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()

            # if i % 10 == 9:
            print(loss.item())
            validation(net, test_loader)
Exemplo n.º 8
0
def build_classify_trainer(tokenizer,
                           model,
                           processor,
                           train_config_file,
                           data_dir,
                           max_seq_length,
                           return_dadaset=False):
    training_args = args_parser._get_training_args(train_config_file)
    trainset = (dataset.DataSet(processor, tokenizer, data_dir, max_seq_length,
                                "train"))
    evalset = (dataset.DataSet(processor, tokenizer, data_dir, max_seq_length,
                               "eval"))
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=trainset,
        eval_dataset=evalset,
        compute_metrics=build_metrics.build_compute_classify_metrics_fn(),
    )

    if return_dadaset:
        return trainer, training_args, trainset, evalset
    else:
        return trainer, training_args
Exemplo n.º 9
0
def main():
    ds=dataset.DataSet()
    ds.getTrainData()
    mouses=ds.train["mouses"]
    goals=ds.train["goals"]
    labels=ds.train["labels"]
    n=ds.train["size"]
    vector=[]
    config["borders"]=get_borders(mouses)
    # print config
    # exit()
    for i in range(n):
        vector.append(getfeature(1,mouses[i],goals[i],1)[0])
    vector=np.array(vector)

    scaler_vector=vector
    vector = preprocessing.scale(vector)
    vector = np.c_[scaler_vector[:,0],vector[:,1:]]
    printdemo(vector)
    # print len(vector[0])
    # exit()
    pca = PCA(n_components=1)
    pca.fit(vector)
    vector=pca.transform(vector)

    dt=datadeal.DataTrain()
    # about 17 w
    clf = MLPClassifier(alpha=0.9,
        activation='logistic', \
        hidden_layer_sizes=(15,19),random_state=0,solver='lbfgs',\
        max_iter=250,early_stopping=True, epsilon=1e-04,\
        # learning_rate_init=0.1,learning_rate='invscaling',
    )

    print clf
    # clf = MLPClassifier(alpha=1e-4,
    #     activation='logistic', \
    #     hidden_layer_sizes=(16,18),random_state=0,solver='lbfgs',\
    #     max_iter=400)

    # False
    test=True
    if test==True:
        dt.trainTest(clf,vector,labels,4.0)
    else:       
        scaler = preprocessing.StandardScaler().fit(scaler_vector)
        dt.train(clf,vector,labels)
        dt.testResultAll(ds,getfeature,savepath='./data/0706tmp.txt',stop=1200,scal=scaler,pca=pca)
Exemplo n.º 10
0
def draw2d():
    ds=dataset.DataSet()
    ds.getTrainData()
    mouses=ds.train["mouses"]
    goals=ds.train["goals"]
    dw=datadraw.DataDraw("2d")

    START=2700
    PAIRS=2
    colors=['b','r','g','y','c','k','m']
    for i in range(PAIRS):
        dw.drawline(mouses[i])
        dw.drawline(mouses[START+i])
        # dw.drawgoal([goals[i][0],goals[i][1],i],c=colors[i%7])
        # dw.drawgoal([goals[START+i][0],goals[START+i][1]],c=colors[(i+3)%7])
    plt.show()
Exemplo n.º 11
0
 def __init__(self, qp=None, sgfdir='sgf', net=None):
     if net:
         self.net=net
     else:
         self.net = None
     self.board = board.Board() #[go.Position(to_move=go.WHITE) for i in range(1)]
     self.board.player1_name='ZiGo_New'
     self.board.player2_name='ZiGo_Old'
     self.qp=qp
     if self.qp:
         self.qp.start(self.board)
     self.sgfdir = sgfdir
     self.datas = dataset.DataSet()
     # neural net 1 always plays "black", and variety is accomplished by
     # letting white play first half the time.
     self.running=False
Exemplo n.º 12
0
def testResultAll():
    ds=dataset.DataSet()
    allnum=0
    machine=0
    machine_list=[]
    while True:
        idx,mouse,goal,label=ds.readTestFile()
        if idx==False:
            break
        if dealmouse(mouse)>0:
            machine_list.append(idx)

        if allnum%1000==0:
            print idx,machine
        allnum+=1
    print len(machine_list)
Exemplo n.º 13
0
def main():
    ds = dataset.DataSet()
    ds.getTrainData()
    mouses = ds.train["mouses"]
    goals = ds.train["goals"]
    labels = ds.train["labels"]
    n = ds.train["size"]
    config["borders"] = get_borders(mouses)
    machine_list = []
    for i in range(n):
        mouse = mouses[i]
        f_label_a = get_spoint_filter(mouse, config)[0]
        f_label_b = get_X_PN(mouse)[0]
        print f_label_a, f_label_b, i
        if f_label_a == 0 and f_label_b == 0:
            machine_list.append(i)
    print len(machine_list)
Exemplo n.º 14
0
def main():
    print("loading data...")
    ds = dataset.DataSet(classes=classes)
    train_X, train_y = ds.load_data(
        'train')  # need to implement, y shape is (None, 360, 480, classes)

    train_X = ds.preprocess_inputs(train_X)
    train_Y = ds.reshape_labels(train_y)
    print("input data shape...", train_X.shape)
    print("input label shape...", train_Y.shape)

    test_X, test_y = ds.load_data(
        'test')  # need to implement, y shape is (None, 360, 480, classes)
    test_X = ds.preprocess_inputs(test_X)
    test_Y = ds.reshape_labels(test_y)

    tb_cb = keras.callbacks.TensorBoard(log_dir=log_filepath,
                                        histogram_freq=1,
                                        write_graph=True,
                                        write_images=True)
    fpath = 'weights.{epoch:02d}.hdf5'
    mc_cb = keras.callbacks.ModelCheckpoint(fpath,
                                            monitor='val_loss',
                                            verbose=0,
                                            save_best_only=False,
                                            save_weights_only=False,
                                            mode='auto',
                                            period=3)

    print("creating model...")
    model = SegNet(input_shape=input_shape, classes=classes)
    model.compile(loss="categorical_crossentropy",
                  optimizer='adadelta',
                  metrics=["accuracy"])

    model.fit(train_X,
              train_Y,
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              class_weight=class_weighting,
              validation_data=(test_X[0:5], test_Y[0:5]),
              shuffle=True,
              callbacks=[mc_cb, tb_cb])

    model.save('seg.h5')
Exemplo n.º 15
0
def clust3ds():
    """Manual random pick from the Iris datast: virginica"""
    ds = mx.DataSet(
        mat=pd.DataFrame(
            [[5.8,2.7,5.1,1.9],
             [6.5,3.0,5.8,2.2],
             [7.2,3.6,6.1,2.5],
             [6.8,3.0,5.5,2.1],
             [6.2,2.8,4.8,1.8],
             [6.4,3.1,5.5,1.8],
             [6.2,3.4,5.4,2.3]],
            index=['O1', 'O2', 'O3', 'O4', 'O5', 'O6', 'O7'],
            columns=['V1', 'V2', 'V3', 'V4']),
        display_name='Some values', kind='Descriptive analysis / sensory profiling',
        style=mx.VisualStyle(fg_color='olive')
    )
    return ds
Exemplo n.º 16
0
def clust2ds():
    """Manual random pick from the Iris datast: versicolor"""
    ds = mx.DataSet(
        mat=pd.DataFrame(
            [[6.9,3.1,4.9,1.5],
             [4.9,2.4,3.3,1.0],
             [5.7,3.0,4.2,1.2],
             [5.1,2.5,3.0,1.1],
             [5.7,2.6,3.5,1.0],
             [5.1,2.5,3.0,1.1],
             [6.1,2.9,4.7,1.4]],
            index=['O1', 'O2', 'O3', 'O4', 'O5', 'O6', 'O7'],
            columns=['V1', 'V2', 'V3', 'V4']),
        display_name='Some values', kind='Descriptive analysis / sensory profiling',
        style=mx.VisualStyle(fg_color='saddlebrown')
    )
    return ds
Exemplo n.º 17
0
def getReuslt3():
    ds = dataset.DataSet()
    ds.getTrainData()
    dt = datadeal.DataTrain()
    clf = MLPClassifier(alpha=1e-5, hidden_layer_sizes=(20), random_state=1)
    y = ds.train["labels"]
    X = ds.train["goals"]
    X = np.mat(X)
    # dt.trainTest(clf,X,y)
    dt.train(clf, X, y)

    def f(idx, mouse, goal, label):
        if idx == False:
            return False
        return np.array(goal).reshape([1, 2])

    dt.testResultAll(ds, f, savepath='./data/ann_goal.txt')
Exemplo n.º 18
0
def train_model():
    '''
    训练模型,并将训练的模型参数进行保存
    :return: 返回训练好模型参数
    '''
    with tf.name_scope('input_dataset'):
        x = tf.placeholder(tf.float32,[None,1200],name="input_dataset_x")
        y = tf.placeholder(tf.float32,[None,CLASS_NUM],name="input_dataset_y")
    y_,keep_prob = fall_net(x)

    with tf.name_scope('loss'):
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=y_)
        loss = tf.reduce_mean(cross_entropy)
        tf.summary.scalar("loss", loss)

    with tf.name_scope('optimizer'):
        train = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)

    with tf.name_scope('accuracy'):
        correct_prediction = tf.equal(tf.argmax(y_,1),tf.argmax(y,1))
        correct_prediction = tf.cast(correct_prediction,tf.float32)
        accuracy = tf.reduce_mean(correct_prediction)
        tf.summary.scalar("accuracy", accuracy)

    data = dataset.DataSet('../data/dataset',CLASS_LIST)
    saver = tf.train.Saver()
    merged = tf.summary.merge_all()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        train_writer = tf.summary.FileWriter("../log/", sess.graph)

        for step in range(1, TRAIN_STEP+1):
            batch_x, batch_y = data.next_batch(BATCH_SIZE)
            if step%100==0:
                train_accuracy = accuracy.eval(feed_dict={x: batch_x, y: batch_y, keep_prob: 1.0})
                print('训练第 %d次, 准确率为 %f' % (step, train_accuracy))
                summ = sess.run(merged, feed_dict={x: batch_x, y: batch_y,keep_prob: 1.0})
                train_writer.add_summary(summ, global_step=step)

            train.run(feed_dict={x: batch_x, y: batch_y, keep_prob: 0.5})

        train_writer.close()
        save_path = saver.save(sess, MODEL_SEVE_PATH)
        print("训练完毕,权重保存至:%s"%(save_path))
Exemplo n.º 19
0
def main():
    ds = dataset.DataSet()
    ds.getTrainData()
    mouses = ds.train["mouses"]
    goals = ds.train["goals"]
    labels = ds.train["labels"]
    n = ds.train["size"]
    vector = []
    for i in range(n):
        vector.append(getfeature(1, mouses[i], goals[i], 1)[0])
    vector = np.array(vector)

    scaler_vector = vector
    vector = preprocessing.scale(vector)
    vector = np.c_[vector[:, 1:], scaler_vector[:, 1]]

    # print vector[0]
    pca = PCA(n_components=16)
    pca.fit(vector)
    vector = pca.transform(vector)

    dt = datadeal.DataTrain()
    # about 2w
    clf = MLPClassifier(alpha=1e-6,
        activation='logistic', \
        hidden_layer_sizes=(16,18),random_state=0,solver='lbfgs',\
        max_iter=800)
    # clf = MLPClassifier(alpha=1e-4,
    #     activation='logistic', \
    #     hidden_layer_sizes=(16,18),random_state=0,solver='lbfgs',\
    #     max_iter=400)

    # False
    test = False
    if test == True:
        dt.trainTest(clf, vector, labels, 10.0)
    else:
        scaler = preprocessing.StandardScaler().fit(scaler_vector)
        dt.train(clf, vector, labels)
        dt.testResultAll(ds,
                         getfeature,
                         savepath='./data/0704tmp.txt',
                         stop=-1,
                         scal=scaler,
                         pca=pca)
Exemplo n.º 20
0
def __experiment_01(data_set, skipSimRank=False, set_no=1, a=0.5, aucn=2000, simrank_iter=10, category='math.GN'):
    print('Kategoria: ',category)
    data = dataset.DataSet('../datasets/', category, data_set)
    matrix = sparse.csc_matrix(
        data.get_training_set(mode='adjacency_matrix_csc', ds_index=set_no), dtype='d')
    training = data.get_training_set() #metrics.get_edges_set(data.get_training_set())
    test = data.get_test_edges() #metrics.get_edges_set(data.get_test_edges())

    print('Zestaw',set_no,' N=', data.vx_count)
    #print('Obliczanie: macierzy przejścia MERW...', end=' ')
    #print(vekt)
    #print(Pmerw.get_shape()[0])
    #print('macierzy "odległości"...')
    #print('Obliczanie: macierzy przejścia GRW... ', end=' ')
    Pgrw, sd = merw.compute_grw(matrix)
    #print('macierzy "odległości"...')
    p_dist_grw = merw.compute_P_distance(Pgrw, alpha=a)
    print('   Skuteczność PD (AUC {}):'.format(aucn),
          metrics.auc(data.vx_count, training, test, p_dist_grw, aucn))
    Pmerw, vekt, eval, stat = merw.compute_merw_matrix(matrix)
    p_dist_merw = merw.compute_P_distance(Pmerw, alpha=a)
    print(' Skuteczność MEPD (AUC {}):'.format(aucn),
          metrics.auc(data.vx_count, training, test, p_dist_merw, aucn))
    ep_dist_grw = merw.compute_P_distance(Pgrw, alpha=a)
    print('  Skuteczność PDM (AUC {}):'.format(aucn),
          metrics.auc(data.vx_count, training, test, ep_dist_grw, aucn))
    ep_dist_merw = merw.compute_P_distance(Pmerw, alpha=a)
    print('  Skuteczność PDM (AUC {}):'.format(aucn),
          metrics.auc(data.vx_count, training, test, ep_dist_merw, aucn))

    if skipSimRank:
        return
    graph = merw.matrix_to_graph(matrix)
    #print(graph)
    print('SimRank...',end='')
    sr, eps = merw.compute_basic_simrank(graph, a, maxiter=simrank_iter)
    print(' Dokładność:', eps)
    print('   Skuteczność SR (AUC {}):'.format(aucn),
          metrics.auc(data.vx_count, training, test, sr, aucn))

    print('MERW SimRank...',end='')
    sr, eps = merw.compute_merw_simrank_ofmatrix(matrix, a, maxiter=simrank_iter)
    print(' Dokładność:', eps)
    print(' Skuteczność MESR (AUC {}):'.format(aucn),
          metrics.auc(data.vx_count, training, test, sr, aucn))
Exemplo n.º 21
0
def clust1ds():
    """Manual random pick from the Iris datast: setosa"""
    ds = mx.DataSet(
        mat=pd.DataFrame(
            [[5.1,3.5,1.4,0.2],
             [4.6,3.4,1.4,0.3],
             [5.4,3.7,1.5,0.2],
             [5.7,3.8,1.7,0.3],
             [5.4,3.4,1.7,0.2],
             [4.8,3.1,1.6,0.2],
             [4.6,3.6,1.0,0.2]],
            index=['O1', 'O2', 'O3', 'O4', 'O5', 'O6', 'O7'],
            columns=['V1', 'V2', 'V3', 'V4']),
        display_name='Some values', kind='Descriptive analysis / sensory profiling',
        # style=VisualStyle(fg_color=(0.8, 0.2, 0.1, 1.0)),
        style=mx.VisualStyle(fg_color='indigo')
    )
    return ds
Exemplo n.º 22
0
 def thread_play_random(self, num=1000):
     game_num = 0
     data = dataset.DataSet()
     name = mp.current_process().name
     while config.running:
         train_start = time.time()
         pos = strategies.simulate_game(board.Board(komi=7.5))
         print("name:", name, "{}:第{}盘随机盘面已准备就绪,开始复盘……".format(time.time()-train_start, game_num))
         train_start = time.time()
         self.replay(pos, data)
         print("name:", name, "{}:第{}盘复盘完成。".format(time.time()-train_start, game_num))
         game_num += 1
         if game_num>num:
             break
     if self.data.data_size>256:
         print("name:", name, "保存训练数据……", end="")
         data.save()
         print("name:", name, "保存完毕!")
Exemplo n.º 23
0
    def __init__(
            self,
            state_shape,
            action_num,
            image_num_per_state,
            model,
            gamma=0.99,  # discount factor
            replay_batch_size=32,
            replay_memory_size=5 * 10**4,
            target_model_update_freq=1,
            max_step=50,
            lr=0.00025,
            clipping=False  # if True, ignore reward intensity
    ):
        print("initializing DQN...")
        self.action_num = action_num
        self.image_num_per_state = image_num_per_state
        self.gamma = gamma
        self.replay_batch_size = replay_batch_size
        self.replay_memory_size = replay_memory_size
        self.target_model_update_freq = target_model_update_freq
        self.max_step = max_step
        self.clipping = clipping

        print("Initializing Model...")
        self.model = model
        self.model_target = copy.deepcopy(self.model)

        print("Initializing Optimizer")
        self.optimizer = optimizers.RMSpropGraves(lr=lr,
                                                  alpha=0.95,
                                                  momentum=0.0,
                                                  eps=0.01)
        self.optimizer.setup(self.model)
        self.optimizer.add_hook(chainer.optimizer.GradientClipping(20))

        print("Initializing Replay Buffer...")
        self.dataset = dataset.DataSet(max_size=replay_memory_size,
                                       max_step=max_step,
                                       frame_shape=state_shape,
                                       frame_dtype=np.uint8)

        self.xp = model.xp
        self.state_shape = state_shape
Exemplo n.º 24
0
def run_train():
    fout = open('inf.txt','w+')
   
    test_config = ModelConfig()
    test_config.keep_prob = 1.0
    test_config.batch_size = 1
    
    Session_config = tf.ConfigProto(allow_soft_placement = True)
    Session_config.gpu_options.allow_growth=True 

    
    
    with tf.Graph().as_default(), tf.Session(config=Session_config) as sess:    
        with tf.device('/gpu:0'):
        #if True:
            initializer = tf.random_uniform_initializer(-test_config.init_scale, 
                                                        test_config.init_scale)
            
            train_model = vgg16.Vgg16(FLAGS.vgg16_file_path)
            train_model.build(initializer)

            data_test = dataset.DataSet(FLAGS.file_path_test,FLAGS.data_root_dir,TEST_SIZE,is_train_set=False)
            
            test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test')
    
            saver = tf.train.Saver(max_to_keep=100)
            last_epoch = load_model(sess, saver,FLAGS.saveModelPath,train_model)
            print ('start: ',last_epoch + 1)
    
            
            

            test_accury_1,test_accury_5,test_loss = run_epoch(sess,test_config.keep_prob, fout,test_config.batch_size, train_model, data_test, tf.no_op(),2,test_writer,istraining=False) 
            info = "Final: Test accury(top 1): %.3f Test accury(top 5): %.3f Loss %.3f" % (test_accury_1,test_accury_5,test_loss)
            print (info)
            fout.write(info + '\n')
            fout.flush()
            
            
           
            test_writer.close()

            print("Training step is compeleted!") 
            fout.close() 
Exemplo n.º 25
0
def axisy():
    ds=dataset.DataSet()
    ds.getTrainData()
    dw=datadraw.DataDraw('2d')
    mouses=ds.train["mouses"]
    goals=ds.train["goals"]
    labels=ds.train["labels"]
    n=ds.train["size"]

    def getvecotr(mouse,goal):
        yn=len(mouse[1])
        y=mouse[1]
        if yn==1:
            return 0
        for i in range(yn)[-1:0:-1]:
            y[i]=y[i]-y[i-1]
        flag=1
        state=y[0]
        ychange=0
        for i in range(1,yn):
            if state*y[i]<0:
                ychange+=1
            state=y[i]
        return ychange

    vector=[]
    for i in range(n):
        vector.append(getvecotr(mouses[i],goals[i]))
    vector=np.array(vector,dtype=np.float)
    # dw.drawline([range(2600),vector[:2600]],c='b')
    # dw.drawline([range(2600,3000),vector[2600:]],c='r')
    # print vector[:2600].mean()
    # print vector[2600:].mean()

    # dw.drawbatchgoal(np.array([vector[:2600],labels[:2600]]).T,c='b')
    # dw.drawbatchgoal(np.array([vector[2600:],labels[2600:]]).T,c='r')
    # plt.show()

    # vector=np.array(vector)
    dt=datadeal.DataTrain()
    # # clf = MLPClassifier(alpha=1e-3, hidden_layer_sizes=(2,2), random_state=1)
    clf = SVC()
    dt.trainTest(clf,vector.reshape([3000,1]),labels)
Exemplo n.º 26
0
def xspeed():
    ds=dataset.DataSet()
    ds.getTrainData()
    dw=datadraw.DataDraw('2d')
    mouses=ds.train["mouses"]
    goals=ds.train["goals"]
    labels=ds.train["labels"]
    n=ds.train["size"]

    def getvecotr(mouse,goal):
        tn=len(mouse[2])
        t=mouse[2]
        x=mouse[0]
        if tn==1:
            return 0
        for i in range(tn)[-1:0:-1]:
            x[i]=x[i]-x[i-1]
            t[i]=t[i]-t[i-1]
            if t[i]>0:
                x[i]=x[i]/t[i]
            else:
                x[i]=0.0
        x=np.array(x)[1:]
        return x.mean()

    vector=[]
    for i in range(n):
        vector.append(getvecotr(mouses[i],goals[i]))
    vector=np.array(vector,dtype=np.float)
    # dw.drawline([range(2600),vector[:2600]],c='b')
    # dw.drawline([range(2600,3000),vector[2600:]],c='r')
    # print vector[:2600].mean()
    # print vector[2600:].mean()

    # dw.drawbatchgoal(np.array([vector[:2600],labels[:2600]]).T,c='b')
    # dw.drawbatchgoal(np.array([vector[2600:],labels[2600:]]).T,c='r')
    # plt.show()

    # vector=np.array(vector)
    dt=datadeal.DataTrain()
    # # # clf = MLPClassifier(alpha=1e-3, hidden_layer_sizes=(2,2), random_state=1)
    clf = SVC()
    dt.trainTest(clf,vector.reshape([3000,1]),labels)
Exemplo n.º 27
0
def assemble():
    ds = dataset.DataSet()
    ds.getTrainData()
    dw = datadraw.DataDraw('2d')
    mouses = ds.train["mouses"]
    goals = ds.train["goals"]
    labels = ds.train["labels"]
    n = ds.train["size"]
    vector = []
    # print mouses[492],goals[492]
    for i in range(n):
        vector.append(getvector(1, mouses[i], goals[i], 1))
    vector = np.array(vector)
    # print vector.shape

    dt = datadeal.DataTrain()
    # clf = MLPClassifier(alpha=1e-3, hidden_layer_sizes=(40), random_state=1)
    clf = SVC()
    dt.trainTest(clf, vector, labels)
Exemplo n.º 28
0
def train_model():
    tf.logging.set_verbosity(tf.logging.INFO)

    trainds = dataset.DataSet(len(dataset.poetrys_vector))
    sess = tf.InteractiveSession()

    input_data, targets = input()

    end_points = rnn_model(128, 2, input_data, targets)

    tf.global_variables_initializer().run()
    merged_summaries = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter(os.getcwd() + '/log', sess.graph)

    saver = tf.train.Saver(tf.global_variables())

    for epoch in range(10):

        for steps in range(dataset.n_chunk):
            x, y = trainds.next_batch(dataset.batch_size)
            summary, loss, _ = sess.run([
                merged_summaries, end_points['train_op'],
                end_points['total_loss']
            ],
                                        feed_dict={
                                            input_data: x,
                                            targets: y
                                        })
            print(loss)
            train_writer.add_summary(summary, 500 * epoch + steps)

            tf.logging.info('steps:%d,loss:%.1f' % (500 * epoch + steps, loss))

            #if batche % 50 == 1:
            #print(epoch, 500*epoch+batche, 0.002 * (0.97 ** epoch))

        saver.save(sess, os.getcwd() + '/model', global_step=epoch)
        print(epoch)
    train_writer.close()
    sess.close()


#train_model()
Exemplo n.º 29
0
def maintrain():
    ds=dataset.DataSet()
    ds.getTrainData()
    mouses=ds.train["mouses"]
    goals=ds.train["goals"]
    labels=ds.train["labels"]
    n=ds.train["size"]
    vector=[]
    labels_tmp=[]
    mouses_tmp=[]
    mouses_tmp2=[]
    labels_tmp2=[]
    x437=[]
    
    vtr=[]
    vtrg=[]
    vtrl=[]
    for i in range(n):
        tmp=get_X_PN(mouses[i])
        if tmp==False:
            vtr.append(getfft(mouses[i]))
            # if i<2600:
            #     vtr.append(getfft(mouses[i]))
            # else:
            #     vtrg.append(getfft(mouses[i]))
            vtrl.append(labels[i])
    vtr=np.array(vtr)
    vtrl=np.array(vtrl)
    # print vtr.shape
    # print vtr[0]
    # exit()
    dt=datadeal.DataTrain()
    # clf2 = MLPClassifier(alpha=0.2,
    #     activation='logistic', \
    #     hidden_layer_sizes=(11,11),random_state=0,solver='lbfgs',\
    #     max_iter=250,early_stopping=True, epsilon=1e-04,\
    #     # learning_rate_init=0.1,learning_rate='invscaling',
    # )
    clf=SVC(C=0.2)
    np.set_printoptions(formatter={'float':lambda x: "%d"%float(x)})
    # confusion=dt.trainTest(clf,vector,labels,4.0,classn=6,returnconfusion=True)
    confusion=dt.trainTest(clf,vtr,vtrl,4.0,classn=2,returnconfusion=True)
Exemplo n.º 30
0
def assemble():
    ds = dataset.DataSet()
    ds.getTrainData()
    # dw=datadraw.DataDraw('2d')
    mouses = ds.train["mouses"]
    goals = ds.train["goals"]
    labels = ds.train["labels"]
    n = ds.train["size"]
    vector = []

    # print mouses[492],goals[492]
    for i in range(n):
        vector.append(getvector(1, mouses[i], goals[i], 1)[0])
        # break
    # exit()
    vector = np.array(vector)

    dt = datadeal.DataTrain()
    clf = MLPClassifier(alpha=1e-6,activation='logistic', \
        hidden_layer_sizes=(20,20),random_state=0,solver='lbfgs',\
        max_iter=1000)
    # clf = SVC(C=1.35,kernel='poly',degree=4,gamma=1,coef0=1.6)

    print vector[0]
    print vector[10]
    print vector[1000]
    print vector[2700]
    print vector[2800]
    print vector[2900]
    exit()
    # test=False
    # with open('./data/93.txt','r') as f:
    #     idxstr=f.read()
    # rightidx=idxstr.split('\n')
    # print rightidx

    test = True
    if test == False:
        dt.trainTest(clf, vector, labels)
    else:
        dt.train(clf, vector, labels)
        dt.testResultAll(ds, getvector, savepath='./data/0624tmp.txt')