Пример #1
0
def game1d():
	game_area = "-" * 20
	round_count = 0

	while True:
		round_count = round_count + 1
		print(game_area)
		game_area = player_move(game_area)

		if evaluate(game_area) != '-':
			break

		game_area = computer_move(game_area)

		if evaluate(game_area) != '-':
			break

	print('Kolo',round_count, game_area)

	if evaluate(game_area) == '!':
		print('Remíza')
	elif evaluate(game_area) == 'o':
		print('Vhrál jsi!')
	elif evaluate(game_area) == 'x':
		print('Vyhrál počítač')
Пример #2
0
def lemonade(individuals, num_individuals, num_classes, num_epochs,
             generations, batch_size, learning_rate):
    for i in range(generations):
        for j in range(len(individuals)):
            if individuals[j].accuracy < 1:
                evaluate(individuals[j], num_epochs, batch_size, learning_rate)
        individuals = select(individuals, num_individuals)
        produce(individuals, num_classes)
    return individuals
Пример #3
0
def genetic(individuals, num_classes, num_epochs, num_individuals, generations,
            batch_size, learning_rate):
    for i in range(generations):
        for j in range(len(individuals)):
            if individuals[j].accuracy < 1:  # 该个体之前训练过就不训练
                evaluate(individuals[j], num_classes, num_epochs, batch_size,
                         learning_rate)
        # individuals = select(individuals, num_individuals)
        # crossover(individuals)
        # mutate(individuals)
    return individuals
Пример #4
0
futurePredict = scaler.inverse_transform(futurePredict)
dataset = scaler.inverse_transform(dataset)
# calculate root mean squared error
rmse(trainY[0, :], trainPredict[:, 0], "Train")
rmse(trainY[0, :], testPredict[:, 0], "Test")
# shift data for plotting
trainPredictPlot = setPlot(trainPredict, len(dataset), testLen,
                           testLen + len(trainPredict))
testPredictPlot = setPlot(testPredict, len(dataset), testLen,
                          testLen + len(testPredict))
futurePredictPlot = setPlot(futurePredict,
                            len(dataset) + len(futurePredict), len(dataset),
                            len(dataset) + len(futurePredict))
#evaluate
print("input data:")
evaluate(dataset)
print("predict data:")
evaluate(futurePredict)
# plot baseline and predictions
testFig = plt.figure("test")
plt.plot(dataset, color='#0B649F', linestyle='-', marker='.', label='input')
plt.plot(trainPredictPlot,
         color='green',
         linestyle='',
         marker='x',
         label='single step predict')
plt.plot(testPredictPlot,
         color='orange',
         linestyle='',
         marker='+',
         label='next step predict')
Пример #5
0
realAssment = []
etmp = ny.eye(7)
for i in range(Y.shape[0]):
    realAssment.append(etmp[Y[i, 0] - 1])
realAssment = ny.mat(realAssment).T
centroids = ny.mat(ny.zeros((19, 7)))
dims = [dataSet2[i].shape[0] for i in range(len(dataSet2))]
centroids2 = [ny.mat(ny.zeros((dims[i], 7))) for i in range(len(dataSet2))]
for i in range(centroids.shape[1]):
    index = int(ny.random.rand() * 2310)
    centroids[:, i] = dataSet[:, index]
    for v in range(len(dataSet2)):
        centroids2[v][:, i] = dataSet2[v][:, index]
cenTmp, assment = kMeans2(dataSet, 7, centroids)
print '\n Con-Mc'
evaluate(assment, realAssment)
print
mspl = MSPL(7, 0.05, 1.4, dataSet2[0:1], centroids2[0:1])
print '\n view1'
mspl.mspl()
evaluate(mspl.Assment, realAssment)
print
mspl = MSPL(7, 0.05, 1.4, dataSet2[1:2], centroids2[1:2])
print '\n view2'
mspl.mspl()
evaluate(mspl.Assment, realAssment)
print
mspl = MSPL(7, 0.05, 1.4, dataSet2, centroids2)
print '\n mspl'
mspl.mspl()
evaluate(mspl.Assment, realAssment)
Пример #6
0
def main():
    #stratifiedSample()

    train_text = pd.read_table('Tweets/SampleTraining/TrainingSample.txt', engine="python-fwf")
    train_labels = pd.read_table('Tweets/SampleTraining/TrainingSampleLabel.txt', engine="python-fwf")
    train_pre = pd.read_table('Tweets/SampleTraining/TrainingSamplePreprocessing.txt', engine="python-fwf")

    test_text = pd.read_table('Tweets/DataTest/us/us_test.txt', engine="python-fwf")
    test_labels = pd.read_table('Tweets/DataTest/us/us_test_labels.txt', engine="python-fwf")
    test_pre = pd.read_table('Tweets/DataTest/us/us_test_preprocessing.txt', engine="python-fwf")
    
    #trial_text = pd.read_table('Tweets/DataTrial/us_trial_text.txt', engine="python-fwf")
    #trial_labels = pd.read_table('Tweets/DataTrial/us_trial_labels.txt', engine="python-fwf")
    	
    # ======================================================================================================================= #

    train_text = train_text['text']
    train_labels = train_labels['labels']
    trainpre = train_pre['text']
    
    #trial_text = trial_text['text']
    #trial_labels = trial_labels['labels']
    
    test_text = test_text['text']
    test_labels = test_labels['labels']
    testpre = test_pre['text']

    # ====================================================================================================================== #
    	
    ''' Criação das matrizes de Word Embeddings '''

    #embedding = "word2vec"
    embedding = "glove"

    print("Criação das matrizes a partir do GloVe...")
    emb_train = word_embeddings(train_text, embedding)
    #emb_trial = word_embeddings(trial_text, embedding)
    emb_test =  word_embeddings(test_text, embedding)
    print("Criação das matrizes de Word Embeddings realizada!")
    
    # ====================================================================================================================== #

    ''' Criação do modelo BoW, houve também estudos que realizaram concatenação de BoW com Embeddings '''
    
    print("Criação do modelo BoW...")
    vec = TfidfVectorizer(min_df=1, ngram_range=(1,4), decode_error='ignore', max_features=3500)
    bow_train = vec.fit_transform(train_text).toarray()
    #bow_trial = vec.transform(trial_text).toarray()
    bow_test =  vec.transform(test_text).toarray()
    print("Modelo BoW criado...")
    
    print("Concatenando Embeddings com BoW...")
    train = np.concatenate((emb_train, bow_train), axis=1)
    #trial = np.concatenate((emb_trial, bow_trial), axis=1)
    test = np.concatenate((emb_test, bow_test), axis=1)
    print("Concatenação realizada!")
    
    # ====================================================================================================================== #

    print("Treinando modelo...")
    #clf = LogisticRegression(C=10.0, random_state=0)
    #clf =  LinearSVC()
    clf = RandomForestClassifier()

    clf.fit(train, train_labels)
    
    print("Treinamento realizado!")
    filename = 'train.sav'
    pickle.dump(clf, open(filename, 'wb'))	
    
    print("Testando modelo...")
    prediction = clf.predict(test)
    
    # Salva as classes previstas em arquivo de txt
    
    print("Salvando as classes previstas em arquivo de txt")
    print
    prediction.dtype = np.int
    #np.savetxt('english.output.bagofwords.txt', prediction, fmt='%d')
    #np.savetxt('english.output.glove.svm.txt', prediction, fmt='%d')
    np.savetxt('english.output.glove.rf.txt', prediction, fmt='%d')
    #np.savetxt('english.output.svm.txt', prediction, fmt='%d')
    #np.savetxt('english.output.rf.txt', prediction, fmt='%d')
    
    # Código para testar train e trial
    #evaluate("us_test_labels.txt", "english.output.bagofwords.txt")
    #evaluate("us_test_labels.txt", "english.output.glove.svm.txt")
    evaluate("us_test_labels.txt", "english.output.glove.rf.txt")
Пример #7
0
        sys.exit()

    # Read in the low energy data as well
    d_LE = ReadData(opts.fsig, m_sname_E2, opts.cuts + "&&!" + opts.sigcut)

    # Load classifier
    clf = joblib.load(opts.modelinput)

    # save data
    savedata(d_eval, d_LE, options.savename, clf)

#**********************************************#
# Run over the evaluation data set
#**********************************************#
if options.evaluate:
    evaluate(d_eval, d_dev, opts)

#**********************************************#
# Plot effective area
#**********************************************#
if options.ploteffarea:

    # Add in the low energy data as well
    d_LE = ReadData(opts.fsig, m_sname_E2, opts.cuts + "&&!" + opts.sigcut)

    # Add it to the evaluation data
    ploteffarea(d_eval, d_dev, opts, d_LE)

#**********************************************#
# Check results of bdt by removing 1 variable
#**********************************************#
Пример #8
0
'''
gnd = matFile['gnd']
realAssment = []
temp = ny.eye(10)
for i in range(gnd.shape[0]):
    realAssment.append(temp[gnd[i, 0]].tolist())  #创建真实分配矩阵
print '真实分配矩阵创建完毕,开始聚类'
mspl = MSPL(10, 1.2, 3, dataSet)
pur = []
acc = []
nmi = []
warnings.filterwarnings('error')
for i in range(20):
    try:
        mspl.Mspl(1.2)
        p, a, n = evaluate(mspl.Assment, ny.mat(realAssment).T)
        pur.append(p)
        acc.append(a)
        nmi.append(n)
    except:
        print '有警告!'
print pur
print acc
print nmi
print 'pur mean(std) max min std', (
    '%.3f(%.3f)' % (ny.mean(pur), ny.std(pur))), max(pur), min(pur)
print 'acc mean(std) max min std', (
    '%.3f(%.3f)' % (ny.mean(acc), ny.std(acc))), max(acc), min(acc)
print 'nmi mean(std) max min std', (
    '%.3f(%.3f)' % (ny.mean(nmi), ny.std(nmi))), max(nmi), min(nmi)
input("回车结束程序!")
Пример #9
0
def eval(individual, num_classes, num_epochs, batch_size, learning_rate):
    evaluate(individual, num_classes, num_epochs, batch_size, learning_rate)
Пример #10
0
        sys.exit()

    # Read in the low energy data as well
    d_LE = ReadData(opts.fsig, m_sname_E2, opts.cuts+"&&!"+opts.sigcut)

    # Load classifier
    clf = joblib.load(opts.modelinput)

    # save data
    savedata(d_eval, d_LE, options.savename, clf)

#**********************************************#
# Run over the evaluation data set
#**********************************************#
if options.evaluate:
    evaluate(d_eval,d_dev,opts)

#**********************************************#
# Plot effective area
#**********************************************#
if options.ploteffarea:
    
    # Add in the low energy data as well
    d_LE = ReadData(opts.fsig, m_sname_E2, opts.cuts+"&&!"+opts.sigcut)

    # Add it to the evaluation data
    ploteffarea(d_eval,d_dev,opts,d_LE)

#**********************************************#
# Check results of bdt by removing 1 variable
#**********************************************#
Пример #11
0
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches

#Set the maximum number of BO iterations
additional_query_size = 51

#Set the hyperparameter space
lim_domain, dim, bounds = get_opt_domain()

#loading the housing dataset
Dataset, dataX, data_label, trainX, testX, label_train, label_test = load_house(
)

#Uniformly sample one point to start optimization
list_domain = init_random_uniform(lim_domain, n_points=1, initial=True)
obs = evaluate(trainX, testX, label_train, label_test, list_domain)
BO_data = np.array(list_domain), obs

#Set up the BO model
BO_model = SMSDKL(data_X=Dataset,
                  bounds=bounds,
                  BO_data=BO_data,
                  lim_domain=lim_domain)

rmse_global = []
rmse_wise = []

rmse, _ = error_function(obs)
rmse_global.append(rmse)
rmse_wise.append(rmse)
Пример #12
0
        print('Loading Unet_base !')
    else:
        trainer.setting.network = Model(in_ch=1, out_ch=1,
                                        list_ch=[-1, 32, 64, 128, 256, 512])
        print('Loading Unet_large !')

    # Load model weights
    print(args.model_path)
    trainer.init_trainer(ckpt_file=args.model_path,
                         list_GPU_ids=[args.GPU_id],
                         only_network=True)

    # Start inference
    print('\n\n# Start inference !')

    csv_path = '../../Catalogue' + '/' + str(args.catalogue) + '.csv'
    catalogue = csv_to_catalogue(csv_path)
    path = '../../../Data/Spine_Segmentation'
    cases = catalogue['test'].dropna()
    list_case_dirs = [os.path.join(path, cases[i]) for i in range(len(cases))]

    inference(trainer, list_case_dirs, save_path=os.path.join(trainer.setting.output_dir, 'Prediction'),
              do_TTA=args.TTA)

    # Evaluation
    print('\n\n# Start evaluation !')
    mean_error = evaluate(prediction_dir=os.path.join(trainer.setting.output_dir, 'Prediction'),
                          gt_dir=path)

    print('\n\nmean error: {}'.format(mean_error))
Пример #13
0
def display_result(model, metric_file_location, test_loader, title=""):
    display_loss_graph(metric_file_location, title=title)
    evaluate(model, test_loader, title=title)
Пример #14
0
    print(f'now working on dataset {index + 1}')
    train_iterator, test_iterator = DatasetPrepare.create_iterators(
        dataset["data_file"])
    Path(dataset["output_dir"]).mkdir(parents=True, exist_ok=True)
    model_output_file = join(dataset["output_dir"], "model.pt")
    metric_output_file = join(dataset["output_dir"], "metric.pt")
    if not exists(model_output_file):
        with open(model_output_file, "w"):
            pass
    if not exists(metric_output_file):
        with open(metric_output_file, "w"):
            pass

    if index > 0:
        evaluate(model=model,
                 test_loader=test_iterator,
                 title=f'Result Before training on dataset #{index + 1}')

    train(model=model,
          optimizer=optimizer,
          train_loader=train_iterator,
          test_loader=test_iterator,
          eval_every=len(train_iterator) // 2,
          model_output_file=model_output_file,
          metric_output_file=metric_output_file,
          num_epochs=10)

    display_result(model=model,
                   metric_file_location=metric_output_file,
                   test_loader=test_iterator,
                   title=f'Result Dataset #{index + 1} and before')
Пример #15
0
    # model.load_weights('./record/mn2_tla_256/mn2_256_tla_Oct24.h5')
    # print(model.summary())

    # model = get_darknet()
    # model.load_weights('./record/darknet_416_normal_1009_gcp/darknet_416_normal_1009_gcp.h5')
    # print(model.summary())

    # model = get_pretrained_darknet()
    # model.load_weights('./record/darknet_416_normal_1009_gcp/darknet_416_normal_1009_gcp.h5')
    # print(model.summary())

    # model = get_rn18()
    # model.load_weights('./record/rn18_normal_256/rn18_normal_Oct23_256.h5')
    # print(model.summary())

    # model = get_pretrained_rn18()
    # model.load_weights('./record/rn18_256_tla/rn18_256_tla_Oct24.h5')
    # print(model.summary())

    average_precisions = evaluate(model, valid_batch, iou_threshold=0.5)

    with open('./evaluation_results/mn_normal_Nov01_android_IoU0_5.txt', 'w') as map_result:
        for label, average_precision in average_precisions.items():
            print(LABELS[label] + ': {:.4f}'.format(average_precision))
            map_result.write(LABELS[label] + ': {:.4f}'.format(average_precision) + '\n')
        print('mAP: {:.4f}'.format(sum(average_precisions.values()) / len(average_precisions)))
        map_result.write('\n\n\n')
        map_result.write('mAP: {:.4f}'.format(sum(average_precisions.values()) / len(average_precisions)))


Пример #16
0
 def test(self, dataset='all'):
     model = self.train(dataset=dataset)
     print(model.summary())
     #model.fit(data, epochs=3, batch_size=64)
     # Final evaluation of the model
     evaluate(self.y_true, self.y_pred)
Пример #17
0
pur = []
acc = []
nmi = []
for i in range(10):

    try:
        #tw.kcent()
        tw.kmeans()
        #tw.tw_kmeans()
        #tw.mspl(0.5)
        #tw.w_mspl(0.5)
        #tw.tw_sql(0.45)
        #tw.wkmeans()
        #print tw.W
        #print tw.V
        p, a, n = evaluate(ny.mat(tw.assment), ny.mat(realAssment).T)
        print 'purity=', p, 'acc=', a, 'nmi=', n
        print
        pur.append(p)
        acc.append(a)
        nmi.append(n)
    except Exception, err:
        print err
        continue
print pur
print acc
print nmi
print 'pur mean(std) max min std', (
    '%.3f(%.3f)' % (ny.mean(pur), ny.std(pur))), max(pur), min(pur)
print 'acc mean(std) max min std', (
    '%.3f(%.3f)' % (ny.mean(acc), ny.std(acc))), max(acc), min(acc)
Пример #18
0
    dataCut=data[0:50]
    for i in range(1,10):
        dataCut=ny.vstack((dataCut,data[i*200:i*200+50]))
    return dataCut

load_data=sio.loadmat("D:\dataSet\handwritten.mat")
dataMat=load_data['profile']
#dataMat=ny.hstack((dataMat,load_data['fourier']))
#dataMat=ny.hstack((dataMat,load_data['mor']))
#dataMat=ny.hstack((dataMat,load_data['pixel']))    
#dataMat=ny.hstack((dataMat,load_data['profile']))
#dataMat=ny.hstack((dataMat,load_data['zer'])) 
print '数据加载完毕'               
gnd=load_data['gnd']
realAssment=[]
temp=ny.eye(10)
#dataMat=cut(dataMat)
#gnd=cut(gnd)
for i in range(gnd.shape[0]):
    realAssment.append(temp[gnd[i,0]].tolist())#创建真实分配矩阵
centroids=ny.mat(ny.zeros((dataMat.shape[1],10)))
for i in range(10):
    #index=int(ny.random.rand()*200)+i*200
    index=int(ny.random.rand()*2000)
    centroids[:,i]=ny.mat(dataMat).T[:,index]
#print dataMat.shape
print '真实分配矩阵创建完毕,开始聚类'
centroids,Assment=kMeans2(ny.mat(dataMat).T, 10,centroids)
print '聚类结束'
evaluate(Assment, ny.mat(realAssment).T)
def training(X_train, Y_train, X_val, Y_val, lr_start, lr_end, epoch,
             batch_size):
    with tf.name_scope('inputs'):
        xs = tf.placeholder(shape=[None, 32, 32, 3],
                            dtype=tf.float32,
                            name='xs')
        ys = tf.placeholder(shape=[
            None,
        ], dtype=tf.int64, name='ys')
        is_training = tf.placeholder(dtype=tf.bool, name='is_training')

        learning_rate = tf.Variable(lr_start, name='learning_rate')
        lr_decay = 0.33
        lr_update = learning_rate.assign(learning_rate * lr_decay)

        train_data_generator = DataGenerator(X_train, Y_train)
        train_batch_generator = train_data_generator.dataGenerator(batch_size)
        iters = int(X_train.shape[0] / batch_size)
        print('number of batches for traning: {}'.format(iters))

        val_batch_size = 100
        val_data_generator = DataGenerator(X_val, Y_val)
        val_batch_generator = val_data_generator.dataGenerator(val_batch_size)
        print('data generator init')

        output, loss = Network(xs,
                               ys,
                               is_training=is_training,
                               out_size=10,
                               lr=learning_rate)

        opt = tf.train.AdamOptimizer(learning_rate).minimize(loss)

        errorate = evaluate(output, ys)
        best_acc = 0
        cur_model_name = 'cifar-10_{}'.format(int(time.time()))

        with tf.Session() as sess:
            merge = tf.summary.merge_all()
            writer = tf.summary.FileWriter("log/{}".format(cur_model_name),
                                           sess.graph)
            # saver = tf.train.Saver()
            sess.run(tf.global_variables_initializer(), {is_training: False})

            for epc in range(epoch):
                print("epoch {} ".format(epc + 1))
                # Training
                train_eve_sum = 0
                loss_sum = 0
                for _ in range(iters):
                    train_batch_x, train_batch_y = next(train_batch_generator)
                    _, cur_loss, train_eve = sess.run([opt, loss, errorate],
                                                      feed_dict={
                                                          xs: train_batch_x,
                                                          ys: train_batch_y,
                                                          is_training: True
                                                      })
                    train_eve_sum += np.sum(train_eve)
                    loss_sum += np.sum(cur_loss)
                with tf.name_scope('train_accuracy'):
                    train_acc = 100 - train_eve_sum * 100 / Y_train.shape[0]

                loss_sum /= iters
                b = sess.graph.get_tensor_by_name(
                    'FC_Weights_9/FC_Layer_Weights_9:0')
                v = sess.run(b)
                print(v[0])
                print(
                    'average train loss: {} ,  average accuracy : {}%'.format(
                        loss_sum, train_acc))
                # Validation
                valid_eve_sum = 0
                for _ in range(Y_val.shape[0] // val_batch_size):
                    val_batch_x, val_batch_y = next(val_batch_generator)
                    valid_eve, merge_result = sess.run([errorate, merge],
                                                       feed_dict={
                                                           xs: val_batch_x,
                                                           ys: val_batch_y,
                                                           is_training: False
                                                       })
                    valid_eve_sum += np.sum(valid_eve)
                with tf.name_scope('validation_accuracy'):
                    valid_acc = 100 - valid_eve_sum * 100 / Y_val.shape[0]

                if epc == 4 or epc == 7 or epc == 10 or epc == 13 or epc == 16:
                    _lr = sess.run([lr_update])

                print('validation accuracy : {}%'.format(valid_acc))

                # When achieve the best validation accuracy, we store the model paramters
                if valid_acc > best_acc:
                    print('* Best accuracy: {}%'.format(valid_acc))
                    best_acc = valid_acc
                    # saver.save(sess, 'model/{}'.format(cur_model_name))
                print(
                    "Traning ends. The best valid accuracy is {}%. Model named {}."
                    .format(best_acc, cur_model_name))

            else:
                pass
Пример #20
0
                                           (time.time() - start_time) /
                                           (batch_i + 1))
            log_str += f"\n---- ETA {time_left}"

            print(log_str)

            model.seen += imgs.size(0)

        if epoch % opt.evaluation_interval == 0:
            print("\n---- Evaluating Model ----")
            # Validation Set
            precision, recall, AP, f1, ap_class = evaluate(
                model,
                path=valid_path,
                iou_thres=0.5,
                conf_thres=0.5,
                nms_thres=0.5,
                img_size=opt.img_size,
                batch_size=8,
            )
            evaluation_metrics = [
                ("val_precision", precision.mean()),
                ("val_recall", recall.mean()),
                ("val_mAP", AP.mean()),
                ("val_f1", f1.mean()),
            ]
            logger.list_of_scalars_summary(evaluation_metrics, epoch)

            # Print class APs and mAP
            ap_table = [["Index", "Class name", "AP"]]
            for i, c in enumerate(ap_class):
Пример #21
0
gnd=matFile['gnd']
realAssment=[]
temp=ny.eye(10)
for i in range(gnd.shape[0]):
    realAssment.append(temp[gnd[i,0]].tolist())#创建真实分配矩阵
print '真实分配矩阵创建完毕,开始聚类'
'''
for i in range(len(dataSet)):
    print '\n'
    cenTmp,assment=kMeans2(dataSet[i], 10,centroids[i])
    print 'kmeans view',i
    evaluate(assment, ny.mat(realAssment).T)
    print '\n'
    mspl=MSPL(10,0.05,1.25,dataSet[i:i+1],centroids[i:i+1])
    mspl.mspl()
    print 'mspl view',i
    evaluate(mspl.Assment, ny.mat(realAssment).T)
print
'''
mspl=MSPL(10,0.05,1.7,dataSet,centroids)
mspl.mspl()
print '\n mspl'
evaluate(mspl.Assment, ny.mat(realAssment).T)
cenTmp,assment=kMeans2(dataSet2, 10,centroids2)
print '\n Con-Mc'
evaluate(assment, ny.mat(realAssment).T)
print
mspl=MSPL(10,0.05,1.2,dataSet,centroids)
mspl.mspl2()
print '\n mspl2'
evaluate(mspl.Assment, ny.mat(realAssment).T)
Пример #22
0
import ktrain
from StandardiseDatasets import StandardiseDatasets
from Evaluate import evaluate

sd = StandardiseDatasets()
predictor = ktrain.load_predictor('models/tweeteval')
x, y_true = sd.get_TweetEval_test()

y_pred = predictor.predict(x)
y_pred = [False if i == 'not hate' else True for i in y_pred]

evaluate(y_true, y_pred)