Пример #1
0
def train(X_train, y_train, save_model='model.h5'):
    """
    This function will be use to train model and save model for given training set.
    X_train: numpy array of training images
    y_train: numpy array of stearing mesurnments.
    save_model: string (name of model, default is model.h5)
    
    return: None
    """

    # Hyperparameters
    batch_size = 32
    epochs = 30
    learning_rate = 0.001

    # Loading model from model.py
    model = m(input_height=IMAGE_HEIGHT, input_width=IMAGE_WIDTH)

    # Plot model as image
    plot_model(model,
               to_file='model_plot.png',
               show_shapes=True,
               show_layer_names=True)

    # If trained model exist already then load first for further training
    if tf.gfile.Exists(save_model):
        model.load_weights(save_model)
    model.compile(loss='mse', optimizer=Adam(learning_rate))

    # Only save model which has best performed on validation set.
    # These are callbacks which are being used in "model.fit" call
    earlyStopping = EarlyStopping(monitor='val_loss',
                                  patience=5,
                                  verbose=1,
                                  mode='min')
    mcp_save = ModelCheckpoint('model.h5',
                               save_best_only=True,
                               monitor='val_loss',
                               mode='min')
    reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss',
                                       factor=0.1,
                                       patience=7,
                                       verbose=1,
                                       epsilon=1e-4,
                                       mode='min')

    # Train the model
    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              callbacks=[earlyStopping, mcp_save, reduce_lr_loss],
              validation_split=0.2,
              shuffle=True)

    return
Пример #2
0

### read train & test input and output
def read_input():
    data_inputs = []
    data_outputs = []
    with open("../data/data.txt") as f:
        content = f.readlines()
        for i in content:
            i = i.strip("\n")
            i = i.split()
            i = [float(x) for x in i]
            temp = []
            temp_box_x = int(i[4]) - int(i[2])
            temp_box_y = int(i[5]) - int(i[3])
            temp.append(i[0])
            temp.append(min(temp_box_x, temp_box_y))
            data_outputs.append(i[1:2])
            data_inputs.append(temp)
    return data_inputs, data_outputs


if __name__ == '__main__':
    m = m()
    m.load_state_dict(torch.load('model_1.pt'))
    train_inputs, train_outputs = read_input()
    for i in range(len(train_inputs)):
        print(m(train_inputs[i]))
        print(train_outputs[i])
        print("****")
Пример #3
0
        'max_features': [0.33, 0.5, 'auto'],
        'min_samples_leaf': [15, 45, 75],
        'n_estimators': [10000],
        'oob_score': [True],
        'n_jobs': [-1]
    }

    gb_param_grid = {
        'min_samples_split': [1000, 1500, 2000],
        'min_samples_leaf': [15, 45, 75],
        'max_depth': [4, 5, 7],
        'max_features': ['sqrt'],
        'subsample': [0.8],
        'n_estimators': [10000]
    }
    model = m()

    gsearch_rf = GridSearchCV(RandomForestRegressor(), rf_param_grid, cv=10)
    gsearch_rf.fit(X, y)
    s3 = boto3.client('s3')

    with StringIO() as f:
        wr = csv.writer(f)
        #wr.writerow(['Location', 'Village', 'Elevation'])

        #for village, location in village_dict.items():

        data = gsearch_rf.cv_results_

        wr.writerows(data)
        #time.sleep(2)
Пример #4
0
        default='',
        help=
        'Path to image folder. This is where the images from the run will be saved.'
    )
    args = parser.parse_args()

    # check that model Keras version is same as local Keras version
    f = h5py.File(args.model, mode='r')
    model_version = f.attrs.get('keras_version')
    keras_version = str(keras_version).encode('utf8')

    if model_version != keras_version:
        print('You are using Keras version ', keras_version,
              ', but the model was built using ', model_version)

    model = m(IMAGE_HEIGHT, IMAGE_WIDTH)
    model.load_weights(args.model)

    if args.image_folder != '':
        print("Creating image folder at {}".format(args.image_folder))
        if not os.path.exists(args.image_folder):
            os.makedirs(args.image_folder)
        else:
            shutil.rmtree(args.image_folder)
            os.makedirs(args.image_folder)
        print("RECORDING THIS RUN ...")
    else:
        print("NOT RECORDING THIS RUN ...")

    # wrap Flask application with engineio's middleware
    app = socketio.Middleware(sio, app)
Пример #5
0
            i = i.split()
            i = [float(x) for x in i]
            temp = []
            temp_box_x = int(i[4]) - int(i[2])
            temp_box_y = int(i[5]) - int(i[3])
            temp.append(i[0])
            temp.append(min(temp_box_x, temp_box_y))
            data_outputs.append(i[1:2])
            data_inputs.append(temp)
    return data_inputs, data_outputs


## add more data: velocity , omega, past position
if __name__ == '__main__':
    train_inputs, train_outputs = read_input()
    m = m()
    m.load_state_dict(torch.load('model_1.pt'))
    optimizer = optim.Adam(m.parameters(), lr=0.005)
    minibatch_size = 3
    num_minibatches = len(train_inputs) // minibatch_size

    for epoch in (range(30)):
        # Training
        print("Training")
        # Put the model in training mode
        m.train()
        start_train = time.time()

        for group in tqdm(range(num_minibatches)):
            total_loss = None
            optimizer.zero_grad()
Пример #6
0
def main():
    FLAGS = create_flag()

    # gpu_list = define_gpu( FLAGS.gpu )
    gpu_list = [FLAGS.gpu]
    os.environ['CUDA_VISIBLE_DEVICES'] = ",".join(map(str, gpu_list))
    print 'Using GPU: %s' % gpu_list

    if FLAGS.model == 'origin':
        from model import RNN as m
    elif FLAGS.model == 'onehot':
        from model import One_Hot as m
    elif FLAGS.model == 'cnn':
        from model import CNN as m
    elif FLAGS.model == 'sigmoid':
        from model import Sigmoid as m
    else:
        raise ValueError(FLAGS.model)

    M = m(
        FLAGS.batch_size,
        FLAGS.hidden_size,
        learning_rate=FLAGS.learning_rate,
        sequence_length=FLAGS.topk if FLAGS.topk else 1000,
        num_layer=FLAGS.layer,
        reuse=FLAGS.reuse,
    )
    print 'Model Created'

    log_dir = "%s/%s" % (FLAGS.log_dir, time.strftime("%m_%d_%H_%M"))
    save_dir = os.path.join(log_dir, 'ckpts')
    if os.path.exists(log_dir):
        print('log_dir exist %s' % log_dir)
        exit(2)
    os.makedirs(save_dir)
    with open(log_dir + '/Flags.js', 'w') as f:
        json.dump(FLAGS.__flags, f, indent=4)
    print 'Writing log to %s' % log_dir

    if 'New' in FLAGS.data_path:
        VFILE = './data/New_Val.h5'
    else:
        VFILE = './data/Validate.h5'

    with tf.Session() as sess:
        writer = tf.train.SummaryWriter(log_dir, sess.graph)
        tfetch = [
            M.global_step,
            M.loss,
            M.accuracy,
            M.train_op,
            M.train_summary,
            M.prediction,
            # M.prediction, M.right_label,
            # M.correct
        ]
        vfetch = [M.loss, M.accuracy, M.validate_summary]

        sess.run(tf.initialize_all_variables())

        running_acc = 0.0
        running_loss = 0.0
        for e in range(FLAGS.epoch):
            titer, tstep = prepare_data(FLAGS.batch_size,
                                        FLAGS.data_path,
                                        shuffle=True,
                                        topk=FLAGS.topk)

            print tstep

            for data in titer:
                gstep, loss, accuracy, _, sum_str, score = M.step(
                    sess, data, tfetch)

                running_acc += accuracy
                running_loss += loss.mean()
                writer.add_summary(sum_str, gstep)

                if gstep % 20 == 0:
                    print '%d E[%d] Acc: %.4f Loss: %.4f' % \
                        (gstep, e, running_acc / 20.0, running_loss / 20.0)
                    running_acc = 0.0
                    running_loss = 0.0

                if (gstep - 1) % 5 == 0:
                    print 'prediction'
                    print score.mean()
                    print '\n\n'

                if gstep % FLAGS.eval_every == 0:

                    viter, vstep = prepare_data(FLAGS.batch_size,
                                                VFILE,
                                                shuffle=False,
                                                topk=FLAGS.topk)
                    vrunning_acc = 0.0
                    vrunning_loss = 0.0

                    for data in viter:
                        loss, accuracy, sum_str = M.step(sess, data, vfetch)
                        vrunning_acc += accuracy
                        vrunning_loss += loss.mean()
                        writer.add_summary(sum_str, gstep + data[0])

                    print 'Evaluate Acc: %.4f Loss: %.4f' % \
                                    (vrunning_acc/vstep, vrunning_loss/vstep)