Ejemplo n.º 1
0
def main(agent):
    #all_results = np.load('data/{}.npy'.format(text))
    all_results = np.zeros((1,17))
    #np.save('data/{}.npy'.format(text), all_results)
    
    model_path     = 'model/{}.pt'.format(agent)
    new_model_path = 'model/{}_new.pt'.format(agent)
    
    while True:
        
        for i in range(n_train):
            print('Training:', i)
            (results, winner) = game(agent, 0, i, new_model_path)
            new_result = np.append(results, np.transpose(2 * np.abs(np.transpose(np.reshape(results[:,0], newshape=[-1,1])) + np.transpose(winner * np.ones((np.size(results, 0), 1))) -1) - 1), 1)
            #all_results = new_result #if all_results == None  else np.append(all_results, new_result, 0)
            all_results = np.append(all_results, new_result, 0)
            
        #np.save('data/{}.npy'.format(agent), all_results)
        
        training(all_results, model_path, new_model_path, agent)
        
        all_results = np.zeros((1,17)) 
        
        # Validation
        print("Validation...")
        victory = 0
        for i in range(n_valid):
            (results, winner) = game(agent, 1, i, new_model_path)
            victory += winner # number of victories for player 1
            print('Validation', i, ':', 100 * victory / (i+1), '%')
            if i > n_valid / 2 and victory / (i+1) < verif_prob - 0.1:
                break
        if victory / n_valid > verif_prob:
            # model validated, replaced with new one
            save_new_model(model_path, new_model_path)
Ejemplo n.º 2
0
def main(args):
    start_time = time.time()

    if args.preprocessing:
        preprocessing(args)

    if args.training:
        training(args)
    
    if args.testing:
        testing(args)

    end_time = round((time.time() - start_time) / 60, 4)
    print(f'Done!; {end_time}min spend')
Ejemplo n.º 3
0
def run_training():
    initial_time = get_time()
    logger.info('Training start.')
    data = serializer.load('data')
    model = training(data)
    serializer.dump(model, 'model')
    logger.info('Training end: ' + str(calculate_time(initial_time)) + ' seconds.')
Ejemplo n.º 4
0
 def training(self):
     self.pushButtonStartPredict.setEnabled(False)
     self.pushButtonStartTraining.setEnabled(False)
     rapport = training.training()
     self.plainTextResultTraining.setPlainText(rapport)
     self.pushButtonStartPredict.setEnabled(True)
     self.pushButtonStartTraining.setEnabled(True)
Ejemplo n.º 5
0
def train(params,id):
    # make the folders
    save_model_path = os.path.join(root_save_dir, str(id))
    save_result_path = os.path.join(save_model_path,"test_results")
    save_log_path = os.path.join(root_log_dir, "{}.txt".format(id))

    if (not os.path.exists(save_model_path)):
        os.makedirs(save_model_path)
        os.makedirs(save_result_path)

    # perform training
    train_results = training(params=params, logger=LoggerGenerator.get_logger(
        log_file_path=save_log_path), save_model_to=save_model_path,model_def=test_model,train_func=do_forward_pass)

    # plot loss vs. iterations
    lines = [str(l) for l in train_results["total_loss_records"]]
    plot_trend_graph(var_names=["total loss"], var_indexes=[-1], var_types=["float"], var_colors=["r"], lines=lines,
                     title="total loss",save_to=os.path.join(save_result_path,"train-total_loss.png"),show_fig=False)

    # perform testing
    results = run_simple_test(params=params, saved_model_path=save_model_path,model_def=test_model)

    # save test results
    results["records"]["precision-recall-curve.jpg"].save(os.path.join(save_result_path,"precision-recall.png"))
    with open(os.path.join(save_result_path,"metrics.txt"),"w") as f:
        f.write(str(results["results"]))

    print("finish testing for parameter set #{}".format(id))
Ejemplo n.º 6
0
def test_im(im_num_start, im_num_end):
    # after training this function will predict the values on test images in a specific range
    global train_images
    global train_labels
    global test_images
    # get trained values
    p_list, probs = training.training(train_labels,train_images)
    test_result = [] # test_result will store the prediction of test images
    for i in xrange(im_num_start, im_num_end + 1):
        im = utils.read_image(i,test_images)
        prob_v = [0,0,0,0,0,0,0,0,0,0] # prob_v stored the prob. for each value vj
        for j in xrange(10):
            p_a_v = 0
            for k in xrange(28):
                for l in xrange(28):
                    # work with sums of log probabilities rather than products of probabilities to avoid underflow errors
                    if im[k][l] == 0:
                        p_a_v = p_a_v + math.log10(probs[j][k][l][0])
                        if p_a_v == 0:
                            print 'problem'
                    else:
                        p_a_v = p_a_v + math.log10(probs[j][k][l][1])
                        if p_a_v == 0:
                            print 'problem'
            prob_v[j] = math.log10(p_list[j]) + p_a_v
        predict = prob_v.index(max(prob_v))
        test_result.append(predict)
    return test_result
Ejemplo n.º 7
0
def main():
    rootPath = os.getcwd() + '\\' + 'training\\'   
    stopWordList = ['a', 'about', 'above', 'after', 'again', 'against', 'all', 'am', 'an', 'and', 'any', 'are', "aren't", 'as', 'at', 'be', 'because', 'been', 'before', 'being', 'below', 'between', 'both', 'but', 'by', "can't", 'cannot', 'could', "couldn't", 'did', "didn't", 'do', 'does', "doesn't", 'doing', "don't", 'down', 'during', 'each', 'few', 'for', 'from', 'further', 'had', "hadn't", 'has', "hasn't", 'have', "haven't", 'having', 'he', "he'd", "he'll", "he's", 'her', 'here', "here's", 'hers', 'herself', 'him', 'himself', 'his', 'how', "how's", 'i', "i'd", "i'll", "i'm", "i've", 'if', 'in', 'into', 'is', "isn't", 'it', "it's", 'its', 'itself', "let's", 'me', 'more', 'most', "mustn't", 'my', 'myself', 'no', 'nor', 'not', 'of', 'off', 'on', 'once', 'only', 'or', 'other', 'ought', 'our', 'ours' ]
    
    classNames = ['Business', 'Entertainment', 'Politics'] 
    trainingQueries = [ 'bing', 'amazon', 'twitter', 'yahoo', 'google', 'beyonce', 'bieber', 'television', 'movies', 'music', 'obama', 'america', 'congress', 'senate', 'lawmakers' ]  
    #docRetrieve(classNames, trainingQueries, rootPath)   
    
    training = training.training()
    trainingDict, vocabulary = training.buildTrainingDict(rootPath , classNames, trainingQueries, stopWordList)
    f = open(rootPath +'trainingDict.json', 'w')
    f.write(json.dumps(trainingDict))
    f.close()
    f = open(rootPath + 'vocabulary.json', 'w')
    f.write(json.dumps(list(vocabulary)))
    
    rootPath = os.getcwd() + '\\' + 'testing\\'
    testingQueries = [ 'apple', 'facebook', 'westeros', 'gonzaga', 'banana' ]
    #docRetrieve(classNames, testingQueries, rootPath)
    testing = testing.testing()
    testingDict = testing.buildTestDict( rootPath , classNames, testingQueries, stopWordList)
    f = open(rootPath +'testingDict.json', 'w')
    f.write(json.dumps(trainingDict))
    f.close()
        
    score, class_tag = testing.naiveBayesTesting(classNames, trainingDict, testingDict)
    print score
    print class_tag
Ejemplo n.º 8
0
def main(args):
    # updating all the global variables based on the input arguments
    if (args.freeze_epochs):
        config.FREEZE_EPOCHS = args.freeze_epochs
    if (args.unfreeze_epochs):
        config.UNFREEZE_EPOCHS = args.unfreeze_epochs

    # updating batch size
    if (args.batch_size):
        config.PARAMS["batch_size"] = args.batch_size

    # updating command line arguments to the ARGS variable
    config.ARGS = args

    # calling required functions based on the input arguments
    if args.mode == "inference":
        inference.inference()
    else:
        training.training(args)
Ejemplo n.º 9
0
def _parse_train(args):
    print("starting training with:\n\t{} epochs\n\t{} batch size\n\t{} bars".format(args.epochs, args.batch, args.bars))
    repr = "standard MIDI-like"
    if args.pianoroll:
        repr = "pianoroll"
    print("\tusing " + repr + " representation")
    if args.transpose:
        print('\tusing traning data transposed to only one major and one minor key')
    else:
        print('\tusing training data transposed to every possible key')
    print("\tverbose is set to " + str(args.verbose))
    if args.resume:
        if args.initialize:
            raise ValueError("Resuming training from previous checkpoint and using pretrained weights is not possible!")
        print("\ttrying to resume training from previous checkpoint")
    if args.initialize:
        print("\ttrying to load pretrained weights from 2 bar model\n")

    training.training(args.epochs, args.batch, args.bars, args.pianoroll, args.transpose, args.verbose, args.save_location, args.resume, args.initialize)
Ejemplo n.º 10
0
def model_train(args, max_iter, learning_rate):
    theta0, theta1, error = training(args, max_iter, learning_rate)
    if error != "":
        print(error)
        return
    save_theta(theta0, theta1)
    theta = pd.read_csv(".save_model.csv", delimiter=',')
    t0 = theta.Theta0[0]
    t1 = theta.Theta1[0]
    data = pd.read_csv(args.file, delimiter=',')
    if args.visualise:
        plot_graph_model(t0, t1, data)
Ejemplo n.º 11
0
def evaluate_optimization(candidates, args):
    global BEST_FITNESS
    global BEST_GENE
    fitness = []

    for cs in candidates:
        fn = training(list(cs), EMBED)
        fitness.append(fn)

        if BEST_FITNESS > min(fitness):
            BEST_FITNESS = min(fitness)
            for i in cs:
                BEST_GENE = [i[0], i[1], i[2], i[3], i[4], i[5], i[6], i[7], i[8], i[9], i[10], i[11], i[12]]

    return fitness
Ejemplo n.º 12
0
def grid_search(bool, discount, softmax, sarsa, filepath):

    episodes = 2000  # number of training episodes

    alpha_list = [0.1, 0.3, 0.5]
    epsilon_list = ['Lin', 'Exp']
    epsilon_list_values = [
        np.linspace(0.8, 0.001, episodes),
        training.exp_decay(episodes)
    ]

    if bool:

        print('Discount:', discount, 'Softmax:', softmax, 'Sarsa:', sarsa)

        # wall
        wall = [[2, 1], [2, 2], [2, 3], [3, 3], [3, 4], [3, 9], [3, 8], [9, 6],
                [7, 6], [6, 6], [6, 6], [6, 7], [6, 8], [8, 3], [7, 0]]
        portal = [[0, 6], [9, 8]]
        sand = [[2, 0], [8, 6], [4, 3], [5, 3], [7, 1], [3, 7], [9, 3]]

        reward_val = np.zeros((2, 3))

        for a, alpha in enumerate(alpha_list):
            alpha_values = np.ones(episodes) * alpha
            for e, epsilon_values in enumerate(epsilon_list_values):
                _, reward, _ = training.training(10, episodes, wall, portal,
                                                 sand, alpha_values,
                                                 epsilon_values, discount,
                                                 softmax, sarsa)
                reward_val[e, a] = reward
                print('Alpha:', alpha, 'Epsilon:', epsilon_list[e], 'Reward:',
                      reward)

        print('--------------------------------------------------')
        print('\n')

        np.save(path + filepath, reward_val)
    reward_val = np.load(path + filepath + '.npy')
    plot_map(reward_val, alpha_list, epsilon_list, filepath)
Ejemplo n.º 13
0
    'max_examples': 10000,
    'epochs': 5,
    'lr': 0.01,
    'train_files': ['/home/Liz/all_gis_islandviewer_iv4aa_data.csv.gz'],
    'valid_files': ['/home/Liz/all_gis_islandviewer_iv4ag_data.csv.gz']
}

perform = []

for _dropout_keep in [0.5, 0.8]:
    training_params['dropout_keep_prob'] = _dropout_keep
    for _l2_coef in [1e-03, 1e-06]:
        training_params['l2'] = _l2_coef
        for _lr in [1e-1, 1e-3]:
            training_params['lr'] = _lr
            _best_cost = training(model_name, model_type, model_params,
                                  training_params)
            msg = 'dropout_keep:', _dropout_keep, 'l2 coef:', _l2_coef, 'lr:', _lr, 'Best val loss', _best_cost
            print msg
            logging.info(msg)
            perform.append([_dropout_keep, _l2_coef, _lr, _best_cost])

logging.info('Finished with iterations.')
p = pd.DataFrame(perform,
                 columns=['dropout_keep', 'l2_coeff', 'lr', 'val_loss'
                          ]).pivot_table(values=['val_loss'],
                                         columns=['l2_coeff'],
                                         index=['dropout_keep', 'lr'])

print p
logging.info('Writing to csv')
p.to_csv('~model_hyperparams_final.csv')
Ejemplo n.º 14
0
import sys
sys.path.append('../src')
from training import training
training('../dict/ntusd-positive.txt','../dict/ntusd-negative.txt','../model/','../dict/ntusd-full.dic')
Ejemplo n.º 15
0
def train(is_finetune=False):

    tf.reset_default_graph()
    startstep = 0 if not is_finetune else int(
        FLAGS.finetune_dir.split('-')[-1])
    with tf.Graph().as_default():
        # ++++++++++++++++++++++++ TRAINING INPUT LAODING ++++++++++++++++++++++++
        x_train, y_train, id_train = batch_inputs.inputs(
            ['./record/train.tfrecords'], FLAGS.batch_size, False)
        print(x_train.shape)
        y_train = tf.one_hot(y_train, FLAGS.num_class)
        tf.summary.image('images', x_train)
        x_train = tf.image.resize_image_with_crop_or_pad(
            x_train, IMAGE_SIZE, IMAGE_SIZE)
        y_train = tf.image.resize_image_with_crop_or_pad(
            y_train, IMAGE_SIZE, IMAGE_SIZE)
        # ++++++++++++++++++++++++ TESTING INPUT LAODING ++++++++++++++++++++++++
        x_test, y_test, id_test = batch_inputs.inputs(
            ['./record/test.tfrecords'], FLAGS.batch_size, True)
        y_test = tf.one_hot(y_test, FLAGS.num_class)
        tf.summary.image('images', x_test)
        x_test = tf.image.resize_image_with_crop_or_pad(
            x_test, IMAGE_SIZE, IMAGE_SIZE)
        y_test = tf.image.resize_image_with_crop_or_pad(
            y_test, IMAGE_SIZE, IMAGE_SIZE)
        # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

        is_training = tf.placeholder(tf.bool, name='is_training')
        keep_prob = tf.placeholder(tf.float32, name="keep_probabilty")
        images = tf.placeholder(
            tf.float32,
            shape=[None, FLAGS.image_h, FLAGS.image_w, FLAGS.image_c])
        labels = tf.placeholder(
            tf.int64, [None, FLAGS.image_h, FLAGS.image_w, FLAGS.num_class])
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

        print('++++++++ Mode building starts here +++++++++')
        if FLAGS.model == "basic":
            logits = inference.inference_basic(images, is_training)
        elif FLAGS.model == "extended":
            logits = inference.inference_extended(images, is_training)
        elif FLAGS.model == "basic_dropout":
            logits = inference.inference_basic_dropout(images, is_training,
                                                       keep_prob)
        elif FLAGS.model == "extended_dropout":
            logits = inference.inference_extended_dropout(
                images, is_training, keep_prob)
        else:
            raise ValueError("The selected model does not exist")

        loss = evaluation.loss_calc(logits=logits, labels=labels)
        train_op, global_step = training.training(loss=loss)
        accuracy = tf.argmax(logits, axis=3)

        summary = tf.summary.merge_all()
        saver = tf.train.Saver(max_to_keep=1000)

        with tf.Session() as sess:

            if (is_finetune):
                print(
                    "\n =====================================================")
                print("  Finetuning with model: ", FLAGS.model)
                print("\n    Batch size is: ", FLAGS.batch_size)
                print("    ckpt files are saved to: ", FLAGS.log_dir)
                print("    Max iterations to train is: ", config.n_train_steps)
                print(" =====================================================")
                saver.restore(sess, FLAGS.finetune_dir)
            else:
                print(
                    "\n =====================================================")
                print("  Training from scratch with model: ", FLAGS.model)
                print("\n    Batch size is: ", FLAGS.batch_size)
                print("    ckpt files are saved to: ", FLAGS.log_dir)
                print("    Max iterations to train is: ", config.n_train_steps)
                print(" =====================================================")
                sess.run(tf.variables_initializer(tf.global_variables()))
                sess.run(tf.local_variables_initializer())

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            train_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)

            for step in range(startstep + 1,
                              startstep + config.n_train_steps + 1):
                images_batch, labels_batch = sess.run(
                    fetches=[x_train, y_train])

                train_feed_dict = {
                    images: images_batch,
                    labels: labels_batch,
                    is_training: True,
                    keep_prob: 0.5
                }

                start_time = time.time()

                _, train_loss_value, \
                    train_accuracy_value, \
                    train_summary_str = sess.run([train_op, loss, accuracy, summary], feed_dict=train_feed_dict)

                # Finding duration for training batch
                duration = time.time() - start_time

                if step % 10 == 0:  # Print info about training
                    examples_per_sec = FLAGS.batch_size / duration
                    sec_per_batch = float(duration)

                    print('\n--- Normal training ---')
                    format_str = (
                        '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                        'sec/batch)')
                    print(format_str % (datetime.now(), step, train_loss_value,
                                        examples_per_sec, sec_per_batch))

                    # eval current training batch pre - class accuracy
                    pred = sess.run(logits, feed_dict=train_feed_dict)
                    # evaluation.per_class_acc(pred, labels_batch)  # printing class accuracy

                    train_writer.add_summary(train_summary_str, step)
                    train_writer.flush()

                if step % 100 == 0 or (step + 1) == config.n_train_steps:
                    # test_iter = FLAGS.num_examples_epoch_test // FLAGS.test_batch_size
                    test_iter = FLAGS.num_examples_epoch_test // FLAGS.batch_size
                    """ Validate training by running validation dataset """
                    print(
                        "\n==========================================================="
                    )
                    print("--- Running test on VALIDATION dataset ---")
                    total_val_loss = 0.0
                    # hist = np.zeros((FLAGS.num_class, FLAGS.num_class))
                    for val_step in range(test_iter):
                        test_img_batch, test_lbl_batch = sess.run(
                            fetches=[x_test, y_test])

                        val_feed_dict = {
                            images: test_img_batch,
                            labels: test_lbl_batch,
                            is_training: True,
                            keep_prob: 1.0
                        }

                        _val_loss, _val_pred = sess.run(
                            fetches=[loss, logits], feed_dict=val_feed_dict)
                        total_val_loss += _val_loss
                        # hist += evaluation.get_hist(_val_pred, val_labels_batch)
                    print(
                        "Validation Loss: ", total_val_loss / test_iter,
                        ". If this value increases the model is likely overfitting."
                    )
                    # evaluation.print_hist_summery(hist)
                    print(
                        "==========================================================="
                    )

                # Save the model checkpoint periodically.
                if step % 1000 == 0 or step % 200 == 0 \
                        or (step + 1) == config.n_train_steps:
                    print("\n--- SAVING SESSION ---")
                    checkpoint_path = os.path.join(FLAGS.log_dir, 'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)
                    print("=========================")

            coord.request_stop()
            coord.join(threads)
Ejemplo n.º 16
0
        x = self.pool2(x)
        x = self.res31(x)
        x = self.res32(x)
        x = self.res33(x)

        x = self.pool3(x)
        x = self.res41(x)
        x = self.res42(x)
        x = self.res43(x)

        x = self.pool4(x)
        x = self.res51(x)
        x = self.res52(x)
        x = self.res53(x)
        x = self.res54(x)

        x = x.view(valid_size, 1, 512)
        x = self.fc1(x)
        x = self.softmax(x)

        return x


if __name__ == "__main__":
    classifier = ResNet()
    lossF = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(classifier.parameters(), lr=0.005)
    optimizer.zero_grad()

    training(classifier, lossF, optimizer, "resnet_best")
Ejemplo n.º 17
0
import torch 

if __name__ == "__main__":
        
    # input parameters
    parser = argparse.ArgumentParser(description='Documentation in the following link: https://github.com/RualPerez/AutoML', formatter_class=argparse.RawTextHelpFormatter)
    parser.add_argument('--batch', help='Batch size of the policy (int)', nargs='?', const=1, type=int, default=15)
    parser.add_argument('--max_layer', help='Maximum nb layers of the childNet (int)', nargs='?', const=1, type=int, default=6)
    parser.add_argument('--possible_hidden_units', default=[1,2,4,8,16,32], nargs='*',
                        type=int, help='Possible hidden units of the childnet (list of int)')
    parser.add_argument('--possible_act_functions', default=['Sigmoid', 'Tanh', 'ReLU', 'LeakyReLU'], nargs='*', 
                        type=int, help='Possible activation funcs of the childnet (list of str)')
    parser.add_argument('--verbose', help='Verbose while training the controller/policy (bool)', nargs='?', const=1, 
                        type=bool, default=False)
    parser.add_argument('--num_episodes', help='Nb of episodes the policy net is trained (int)', nargs='?', const=1, 
                        type=int, default=500)
    args = parser.parse_args()
    
    # parameter settings
    args.possible_hidden_units += ['EOS']
    total_actions = args.possible_hidden_units + args.possible_act_functions
    n_outputs = len(args.possible_hidden_units) + len(args.possible_act_functions) #of the PolicyNet
    
    # setup policy network
    policy = PolicyNet(args.batch, n_outputs, args.max_layer)
    
    # train
    policy = training(policy, args.batch, total_actions, args.verbose, args.num_episodes)
    
    # save model
    torch.save(policy.state_dict(), 'policy.pt')
Ejemplo n.º 18
0
                          people_num // n_fold_num * (n_fold_num - 1))
val_y = val_y.reshape(n_fold_num, people_num // n_fold_num)

if (n_fold_num == 1):
    temp = train_x
    train_x = val_x
    val_x = temp
    temp = train_y
    train_y = val_y
    val_y = temp

#train the folds
acc = 0.0
for f in range(n_fold_num):
    b_sol, w_sol, mean, sigma, feature_enco = training.training(
        train_x, train_y, f, feature_num, n_fold_num, people_num, feature,
        norm)
    if (f == 0):
        w_his1 = w_sol
        b_his1 = [b_sol]

    else:
        w_his1 = np.append(w_his1, w_sol)
        b_his1 = np.append(b_his1, b_sol)
    if (n_fold_num != 1):
        acc_temp = training.validation(val_x, val_y, w_sol, b_sol, f,
                                       feature_num, feature, norm, mean, sigma,
                                       ratio)
        print(acc_temp)
        acc = acc + acc_temp
import sys
sys.path.append('../src')
from training import training
training('../data/positive.txt','../data/negative.txt','../model/','../dict/user_dic.dic')
Ejemplo n.º 20
0
 def get_fitness(self, vector):
     """
         maximize -(x^2 + (y+1)^2) + 4
         The maximum is 3.75 at (0.5, -1) (remember that x is fixed at 0.5 here).
     """
     return training(vector, EMBED)
Ejemplo n.º 21
0
		cropped_face=cropped_faces[i]

		label,confidence=face_recognizer.predict(cropped_face)  #confidence is an integer returned by predict method which gives associated confidence (e.g. distance) for the predicted label.
		cv.rectangle(img,(x, y),(x+ width, y + height),(0, 255, 0),2)  #draw a rectangle around detected face
		cv.putText(img,str(names[label]),(x,y),cv.FONT_HERSHEY_PLAIN,1,(0,255,0),1) #put the correct label above the detected face


	

	return img




#train the data using all the images in the dataset
faces,labels=training("training-data")

#LFBP Face Recognizer
face_recognizer=cv.face.LBPHFaceRecognizer_create()

#convert labels (list) to numpy array as LBPH Face Recognizer needs a numpy array as its second argument
labels=np.array(labels)
face_recognizer.train(faces,labels)


testimagepath="test-data/1.jpeg"

names={1:'Evans',2:'Hemsworth'} # a dictionary to store name associated with each label
predictedimg=predict(testimagepath,face_recognizer,names)

if predictedimg is not None:
    return draw_img



if __name__ == '__main__':
    global history, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins

    history = deque(maxlen = 8)

    pix_per_cell = 8  # HOG pixels per cell
    cell_per_block = 2  # HOG cells per block
    spatial_size = (32, 32)  # Spatial binning dimensions
    hist_bins = 32  # Number of histogram bins
    orient = 15  # HOG orientations

    images = glob.glob('./training-data/*/*/*.png')
    print(images)
    svc, X_scaler = training(images, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)

    image = mpimg.imread('./test_images/test4.jpg')
    img = detect_cars(image)
    fig = plt.gcf()
    fig.set_size_inches(16.5, 8.5)
    plt.imshow(img)
    plt.show()

    output = 'project_video_result.mp4'
    clip = VideoFileClip("project_video.mp4")
    video_clip = clip.fl_image(detect_cars)
    video_clip.write_videofile(output, audio=False)
Ejemplo n.º 23
0
def main():

    # Safety checkpoint
    if (len(sys.argv) < 2 or len(sys.argv) > 3):
        print('Number of arguments is incorrect')
        Usage()
    ''' Creating the Model '''
    network, criterion, optimizer = create_smaller_UNET()  # 3 layers model

    if (sys.argv[1] == '--predict'):
        print("Loading the model... ")
        checkpoint = torch.load(BEST_MODEL_PATH)
        network.load_state_dict(checkpoint['state_dict'])

    elif (sys.argv[1] == '--train'):
        print("Reading", TRAINING_SIZE, "training image(s)")
        trainset = DatasetPatched(TRAIN_IMAGE_PATH, TRAIN_GROUNDTRUTH_PATH,
                                  TRAINING_SIZE, OVERLAY_SIZE, ROTATION)
        trainloader = DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True)

        print("Reading", VAL_SIZE, "validation image(s)")
        valset = DatasetPatched(TRAIN_IMAGE_PATH, TRAIN_GROUNDTRUTH_PATH,
                                VAL_SIZE, OVERLAY_SIZE, ROTATION)
        valloader = DataLoader(valset, batch_size=BATCH_SIZE, shuffle=True)
        ''' Training phase '''
        # TODO: optional, add: ReduceLROnPlateau
        # TODO: optional, put early stopping
        val_loss_hist, val_loss_hist_std, train_loss_hist, train_loss_hist_std, val_acc_hist, val_acc_hist_std, train_acc_hist, train_acc_hist_std = training(
            network, criterion, optimizer, score, trainloader, valloader,
            PATCH_SIZE, NUM_EPOCHS)
        print("Saving the model... ")
        torch.save({'state_dict': network.state_dict()}, MODEL_PATH)
    else:
        print("Argument not recognized")
        Usage()

    print("Load testing data... ")
    testset = TestDataset(TEST_IMAGE_PATH, NR_TEST_IMAGES)
    loader_test = DataLoader(testset, batch_size=1,
                             shuffle=False)  # To do : increase the batch_size

    print("Predict labels... ")
    roadsPredicted = predict_test_images(network, loader_test)

    # Transform pixel-wise prediction to patchwise
    patched_images = patch_prediction(roadsPredicted, TEST_IMG_SIZE,
                                      IMG_PATCH_SIZE)
    ''' Get patches for submission '''
    patches = getPatches(patched_images)
    ''' Generate submission '''
    reconstruct_img(NR_TEST_IMAGES, TEST_IMG_SIZE, IMG_PATCH_SIZE, patches,
                    PREDICTED_PATH)
    submission_to_csv(SUBMISSION_PATH, PREDICTED_PATH)

    print('See latest submission file ', SUBMISSION_PATH)
    print('See predicted images at    ', PREDICTED_PATH)
Ejemplo n.º 24
0
 def startTraining(self):
     training.training(self.get_next_input,self.get_next_reference_sequence)
     return 0
Ejemplo n.º 25
0
import preprocessing
import training as tr

# Load dataset
dataset = np.load(params.PATH_DATA)

# Splitting the dataset
index_train, index_valid, index_test = preprocessing.split_dataset(dataset)

# Define the model
model = models.get_LSTM_v1(params.T - 1, params.D - 1, params.LR,
                           params.NHIDDEN, params.NNEURONSH,
                           params.DROPOUT_RATE)
#model = models.get_CausalCNN_v5(params.T-1, params.D-1, params.LR, params.DROPOUT_RATE)

# Training
tr.training(params.PATH_EXPERIMENT,
            model,
            dataset,
            index_train,
            index_valid,
            params.D,
            params.B,
            params.NB_SAMPLES_PER_EPOCH,
            params.NB_EPOCHS,
            params.PATIENCE,
            params.LR,
            weighted_samples=params.WEIGHTED_SAMPLES,
            pretrained=params.PRETRAINED,
            h5py=params.H5PY)
Ejemplo n.º 26
0
from training import training

import numpy as np 

from loadData import loadData


with open( 'twidf_window4_directed_weighted' ,"r") as File:
	X = np.loadtxt(File , delimiter=',')

path = '../data/r8_train_stemmed.txt'
trainData = True

data = loadData(path,trainData)

labels = data['labels']

(dictionnaryOfClasses , labelsInNumbers) = labelDictionnary(labels)

lsi = True
numberOfComponents = 100

(reducedMatrix , Y) = dimensialityReduction(X , labelsInNumbers , lsi , numberOfComponents)


svm = True

scores = training(reducedMatrix , Y , svm )

print scores['micro']
print scores['macro']
Ejemplo n.º 27
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 16 19:42:07 2017

@author: axionl
"""

from train_data import get_train_data
from training import training
from testing import testing
import numpy as np
import matplotlib.pyplot as plt

root_path = "../Training_data/"
[train_data, distance] = get_train_data(root_path)
alpha = 0.01  # 下降速度
num_iters = 300  # 迭代次数
training_summary = np.zeros((1, 6))
T = 5  # 矩阵行数

theta_summary = training(train_data, distance, T, alpha, num_iters)
# testing_result = testing(theta_summary)
error = testing(train_data, distance, T, theta_summary)
plt.plot(error[:, 1:2], error[:, 0:1], 'ro')
plt.show()
Ejemplo n.º 28
0
  o_file.write('#file_name, file_size(octect), number_of_lines_model, number_of_lines_test, mean_time_training, mean_time_prediction, accuracy\n')

dataFiles = os.listdir("./data")
dataFiles = ["./data/"+i for i in dataFiles]
dataFiles.sort()

#MLlib seems to take time to have maximum speed so we "train" it
print('"Warming up" MLLib to get maximum speed')
for i in range(0, 5):
  testTree.main(sc, dataFiles[0], NB_PARTITIONS)

print("----------------")
print("Real test begins")
print("----------------")
for it, dataFile in enumerate(dataFiles):
  stats = []  
  partialStat = training.training(sc, dataFile, NB_PARTITIONS)
  
  #Compute statistics on dataFile
  size = os.path.getsize(dataFile)
  stats += [dataFile, size]
  stats += partialStat
  with open(OUTPUT_FILE, 'a') as o_file:
    wr = csv.writer(o_file)
    wr.writerow(stats)
  print("----------------")
  
print("Test finished")
print("----------------")
print("Writing results on " + OUTPUT_FILE)
Ejemplo n.º 29
0
Plots the total error for L constant, and W taking values chosen in constant.py
"""

from testing import testing
from training import training
from constant import W, LL, N, N_ite, learning_rate, NB_EPOCH, id_function
import numpy as np
import matplotlib.pyplot as plt

errormax = []
errorplot = []
errormin = []
for lite in LL:
    errorl = []
    for n in range(N_ite):
        training(W, lite, N, NB_EPOCH, learning_rate, id_function)
        errorl.append(testing(W, lite, N, NB_EPOCH, learning_rate,
                              id_function))
    means = np.mean(errorl)
    sigma = np.sqrt(np.var(errorl, ddof=1))
    errormax.append(means + sigma)
    errorplot.append(means)
    errormin.append(means - sigma)

plt.clf()
plt.figure()

fig, ax1 = plt.subplots()

ax1.set_xlabel("L", fontsize=14)
ax1.set_ylabel("Error", fontsize=14)
Ejemplo n.º 30
0
from training import training
from data import reprod_init
from vizualization import vizualization
from MNISTNet import MNISTNet

reprod_init()

mnist_net = MNISTNet(100)

test_loss, test_accuracy = training(mnist_net)

vizualization(test_loss, test_accuracy)

Ejemplo n.º 31
0
def main():    
    
    global scanning
    global ser
    
    # Creazione modello
    model = training.createModel()

    #TOGLIERE IL COMMENTO ALLA RIGA QUI SOTTO PER AGGIUNGERE LA FASE DI TRAINING
    training.training(paintsPath, size, model)
    model.load("model.xml")
    
    #Legge l'immagine da riconoscere
    [W, w] = imageUtils.read_images(picPath, size, 1)
    if W is None:
        print("Faccia non riconosciuta")
        scanning = True
        cv2.imshow('display',again)
        cv2.waitKey(1)
        time.sleep(4)
        cv2.imshow('display',home)
        cv2.waitKey(1)
        ser = serial.Serial(serialPort, 9600)
        return None
    
    #Effettua la classificazione
    [p_label, p_confidence] = model.predict(np.asarray(W[0]))

    
    #mostra l'immagine

    #creo un'immagine nera
    bg = np.zeros(scsize, np.uint8)
    vis = imageUtils.showImage(W, paintsPath, p_label, size)
    #metto al centro dell'immagine nera il confronto tra volto rilevato e quadro
    bg[bg.shape[0]/2-vis.shape[0]/2:bg.shape[0]/2+vis.shape[0]/2, bg.shape[1]/2-vis.shape[1]/2:bg.shape[1]/2+vis.shape[1]/2] = vis
    cv2.imshow("display", bg)

    
    #mostra le immagini di confronto per 5 secondi, poi 
    #chiude le finestre e si rimette in ascolto dell'arduino
    cv2.waitKey(5000)
    
    siz = cv2.cv.GetSize(cv2.cv.fromarray(vis))
    
    #thumbnail = cv2.cv.CreateImage( ( siz[0] / 10, siz[1] / 10), 8, 1)
    newsz = (siz[0]/2, siz[1]/2)
    print "newsz", newsz
    thumbnail=cv2.resize(vis, newsz)
    # mostra in dipinto originale

    filename = paintsOriginalPath+ "/" +str(p_label) +"/" + str(p_label)+ ".jpg"
    im = cv2.imread(filename, cv2.CV_LOAD_IMAGE_COLOR)
    #carico il confronto tra le due facce in alto a sinistra
    im[0:thumbnail.shape[0], 0:thumbnail.shape[1],0] = thumbnail
    im[0:thumbnail.shape[0], 0:thumbnail.shape[1],1] = thumbnail
    im[0:thumbnail.shape[0], 0:thumbnail.shape[1],2] = thumbnail
    cv2.imshow('display',im)
    cv2.waitKey(15000)

    #cv2.destroyAllWindows()
    scanning = True
    ser = serial.Serial(serialPort, 9600)
    cv2.imshow('display',home)
    cv2.waitKey(1)
Ejemplo n.º 32
0
    'convolutional_size_params': [["conv1", [1, 10, 32], 'conv'],
                                  ["pool1", [5, 5, 5, 5], 'pool'],
                                  ["conv2", [1, 10, 32], 'conv'],
                                  ["pool2", [5, 5, 5, 5], 'pool']],
    'fc_size_params': [['h_fc1', 128], ['out', 2]],
    'l2':
    0.001,
    'batch_size':
    10,
    'valid_size':
    10
}
training_params = {
    'dropout_keep_prob':
    1.0,
    'max_grad':
    0.01,
    'epochs':
    10,
    'lr':
    0.01,
    'train_files': [
        '/afs/csail.mit.edu/u/p/priyav/PAI_data/final_data/all_gis_islandviewer_iv4aa_data.csv.gz'
    ],
    'valid_files': [
        '/afs/csail.mit.edu/u/p/priyav/PAI_data/final_data/all_gis_islandviewer_iv4ag_data.csv.gz'
    ]
}

training(model_name, model_type, model_params, training_params)
Ejemplo n.º 33
0
"""
Plot the total error for L constant, and W taking values chosen in constant.py
"""
from testing import testing
from training import training
from constant import WW, L, N, N_ite, learning_rate, NB_EPOCH, id_function
import numpy as np
import matplotlib.pyplot as plt

errormax = []
errorplot = []
errormin = []
for wite in WW:
    errorw = []
    for n in range(N_ite):
        training(wite, L, N, NB_EPOCH, learning_rate, id_function)
        errorw.append(testing(wite, L, N, NB_EPOCH, learning_rate,
                              id_function))
    means = np.mean(errorw)
    sigma = np.sqrt(np.var(errorw, ddof=1))
    errormax.append(means + sigma)
    errorplot.append(means)
    errormin.append(means - sigma)

plt.clf()
plt.figure()

fig, ax1 = plt.subplots()

ax1.set_xlabel("W", fontsize=14)
ax1.set_ylabel("Error", fontsize=14)
Ejemplo n.º 34
0
    def planning(self, currLoc, utilObj, params):
        u = currLoc
        trainObj = training(self.dirName, self.ownNo)
        allV = utilObj.readMap()
        it = initialD(allV, currLoc)
        update = 0
        while update == 0:
            #it.Q.remove(u)
            lenPred = utilObj.findLenPredecessor(it, u)
            k = lenPred + 1
            self.B_curr[u] = utilObj.findNeighbor(u)
            f1 = os.path.join(self.dirName,
                              self.base1 + str(self.ownNo) + self.suffix)
            fid1 = open(f1, 'a')
            outtxt1 = 'AGV: ' + str(
                self.ownNo) + ' ' + 'U: ' + str(u) + ' ' + 'B[u]' + '\n'
            fid1.write(outtxt1)
            fid1.close()
            fid1 = open(f1, 'a')
            np.savetxt(fid1, self.B_curr[u], delimiter=',')
            fid1.close()

            #print("Neighor of u", self.B_curr[u])
            if (len(self.B_curr[u]) > 0):
                for e in self.B_curr[u]:
                    if (k <= 1):
                        prevTask = currLoc
                    elif k > 1:  #      rootDic, k, u, utilObj, prevTask
                        prevTask = it.pi_v[u]
                    #print("K: ", k)
                    #print("PrevTask: ", prevTask)
                    estimatedCost = trainObj.computeCost(
                        rootDic=self.dirName,
                        noAGV=self.ownNo,
                        k=k,
                        u=u,
                        utilObj=utilObj,
                        prevTask=prevTask
                    )  #rootDic, noAGV, k, u, utilObj, prevTask
                    estimatedCost = estimatedCost.flatten()
                    f2 = os.path.join(
                        self.dirName,
                        self.base2 + str(self.ownNo) + self.suffix)
                    outtxt2 = 'AGV: ' + str(self.ownNo) + ' ' + 'U: ' + str(
                        u) + ' ' + 'to' + ' ' + 'E: ' + str(e) + ' ' + str(
                            estimatedCost[0]) + ' ' + str(
                                estimatedCost[1]) + ' ' + str(
                                    estimatedCost[2]) + '\n'
                    fid2 = open(f2, 'a')
                    fid2.write(outtxt2)
                    fid2.close()
                    self.stateDict[k] = estimatedCost
                    utilObj.storeObs(k, e, estimatedCost)
                    sumCost = estimatedCost[0] + estimatedCost[
                        1] + estimatedCost[2]
                    utilObj.relax(u, e, it, sumCost)  #correct relax
                    self.B_curr[e] = utilObj.findNeighbor(e)
                    fid1 = open(f1, 'a')
                    outtxt1 = 'AGV: ' + str(self.ownNo) + ' ' + 'E: ' + str(
                        e) + ' ' + 'B[e]' + '\n'
                    fid1.write(outtxt1)
                    fid1.close()
                    fid1 = open(f1, 'a')
                    np.savetxt(fid1, self.B_curr[e], delimiter=',')
                    fid1.close()
                    if (len(self.B_curr[e]) > 0):
                        for j in self.B_curr[e]:
                            if (j not in self.B_curr[u]) and (j != currLoc):
                                print("j: ", j)
                                lenPred = utilObj.findLenPredecessor(it, e)
                                k_dash = lenPred + 1
                                prevTask = it.pi_v[e]
                                #print("K_dash: ", k_dash)
                                #print("PrevTask: ", prevTask)
                                estimatedCost = trainObj.computeCost(
                                    rootDic=self.dirName,
                                    noAGV=self.ownNo,
                                    k=k_dash,
                                    u=e,
                                    utilObj=utilObj,
                                    prevTask=prevTask
                                )  #(self.dirName, k, e, utilObj, prevTask)
                                estimatedCost = estimatedCost.flatten()
                                self.stateDict[k_dash] = estimatedCost
                                utilObj.storeObs(k_dash, j, estimatedCost)
                                f3 = os.path.join(
                                    self.dirName,
                                    self.base2 + str(self.ownNo) + self.suffix)
                                outtxt3 = 'AGV: ' + str(
                                    self.ownNo) + ' ' + 'E: ' + str(
                                        e) + ' ' + 'to' + ' ' + 'J: ' + str(
                                            j) + ' ' + str(
                                                estimatedCost[0]) + ' ' + str(
                                                    estimatedCost[1]
                                                ) + ' ' + str(
                                                    estimatedCost[2]) + '\n'
                                fid3 = open(f3, 'a')
                                fid3.write(outtxt3)
                                fid3.close()
                                sumCost = estimatedCost[0] + estimatedCost[
                                    1] + estimatedCost[2]
                                utilObj.relax(e, j, it, sumCost)
                                self.B_curr[j] = utilObj.findNeighbor(j)
                                fid1 = open(f1, 'a')
                                outtxt1 = 'AGV: ' + str(
                                    self.ownNo) + ' ' + 'J: ' + str(
                                        j) + ' ' + 'B[j]' + '\n'
                                fid1.write(outtxt1)
                                fid1.close()
                                fid1 = open(f1, 'a')
                                np.savetxt(fid1, self.B_curr[j], delimiter=',')
                                fid1.close()
                                if (len(self.B_curr[j]) > 0):
                                    for h in self.B_curr[j]:
                                        if (h not in self.B_curr[u]) and (
                                                h not in self.B_curr[e]) and (
                                                    h != currLoc):
                                            lenPred = utilObj.findLenPredecessor(
                                                it, h)
                                            k_ddash = lenPred + 1
                                            prevTask = it.pi_v[j]
                                            #print("K_ddash: ", k_ddash)
                                            #print("PrevTask: ", prevTask)
                                            estimatedCost = trainObj.computeCost(
                                                rootDic=self.dirName,
                                                noAGV=self.ownNo,
                                                k=k_ddash,
                                                u=j,
                                                utilObj=utilObj,
                                                prevTask=prevTask
                                            )  #(self.dirName, k, j, utilObj, prevTask)
                                            estimatedCost = estimatedCost.flatten(
                                            )
                                            self.stateDict[
                                                k_ddash] = estimatedCost
                                            utilObj.storeObs(
                                                k_ddash, h, estimatedCost)
                                            f4 = os.path.join(
                                                self.dirName, self.base2 +
                                                str(self.ownNo) + self.suffix)
                                            outtxt4 = 'AGV: ' + str(
                                                self.ownNo
                                            ) + ' ' + 'J: ' + str(
                                                j
                                            ) + ' ' + 'to' + ' ' + 'H: ' + str(
                                                h) + ' ' + str(
                                                    estimatedCost[0]
                                                ) + ' ' + str(
                                                    estimatedCost[1]
                                                ) + ' ' + str(
                                                    estimatedCost[2]) + '\n'
                                            fid4 = open(f4, 'a')
                                            fid4.write(outtxt4)
                                            fid4.close()
                                            sumCost = estimatedCost[
                                                0] + estimatedCost[
                                                    1] + estimatedCost[2]
                                            utilObj.relax(j, h, it, sumCost)
                                            self.B_curr[
                                                h] = utilObj.findNeighbor(h)
                                            fid1 = open(f1, 'a')
                                            outtxt1 = 'AGV: ' + str(
                                                self.ownNo
                                            ) + ' ' + 'H: ' + str(
                                                h) + ' ' + 'B[h]' + '\n'
                                            fid1.write(outtxt1)
                                            fid1.close()
                                            fid1 = open(f1, 'a')
                                            np.savetxt(fid1,
                                                       self.B_curr[h],
                                                       delimiter=',')
                                            fid1.close()
                                            if (len(self.B_curr[h]) > 0):
                                                for l in self.B_curr[h]:
                                                    if (l not in self.B_curr[u]
                                                        ) and (l not in self.
                                                               B_curr[e]) and (
                                                                   l
                                                                   not in self.
                                                                   B_curr[j]
                                                               ) and (l !=
                                                                      currLoc):
                                                        lenPred = utilObj.findLenPredecessor(
                                                            it, h)
                                                        k_dddash = lenPred + 1
                                                        prevTask = it.pi_v[h]
                                                        #print("K_dddash: ", k_dddash)
                                                        #print("PrevTask: ", prevTask)
                                                        estimatedCost = trainObj.computeCost(
                                                            rootDic=self.
                                                            dirName,
                                                            noAGV=self.ownNo,
                                                            k=k_dddash,
                                                            u=h,
                                                            utilObj=utilObj,
                                                            prevTask=prevTask
                                                        )  #(self.dirName, k, h, utilObj, prevTask)
                                                        estimatedCost = estimatedCost.flatten(
                                                        )
                                                        self.stateDict[
                                                            k_dddash] = estimatedCost
                                                        utilObj.storeObs(
                                                            k_dddash, l,
                                                            estimatedCost)
                                                        f5 = os.path.join(
                                                            self.dirName,
                                                            self.base2 +
                                                            str(self.ownNo) +
                                                            self.suffix)
                                                        outtxt5 = 'AGV: ' + str(
                                                            self.ownNo
                                                        ) + ' ' + 'H: ' + str(
                                                            h
                                                        ) + ' ' + 'to' + ' ' + 'L: ' + str(
                                                            l
                                                        ) + ' ' + str(
                                                            estimatedCost[0]
                                                        ) + ' ' + str(
                                                            estimatedCost[1]
                                                        ) + ' ' + str(
                                                            estimatedCost[2]
                                                        ) + '\n'
                                                        fid5 = open(f5, 'a')
                                                        fid5.write(outtxt5)
                                                        fid5.close()
                                                        sumCost = estimatedCost[
                                                            0] + estimatedCost[
                                                                1] + estimatedCost[
                                                                    2]
                                                        utilObj.relax(
                                                            h, l, it, sumCost)
                                            else:
                                                break
                                else:
                                    break
                    else:
                        break
            else:
                break

            self.taskSequence = utilObj.findTaskPath(u, it)
            print("Tasksequence", self.taskSequence)
            f6 = os.path.join(self.dirName,
                              self.base3 + str(self.ownNo) + self.suffix)
            fid6 = open(f6, 'a')
            # self.fid1.write(outtxt1)
            np.savetxt(fid6, self.taskSequence, delimiter=',')
            fid6.close()
            self.lenT = len(self.taskSequence)
            self.endTask = self.taskSequence[self.lenT - 1]
            update = 1

            print("Len of stateDict", len(self.stateDict))
            return self.stateDict, self.lenT