예제 #1
0
def train():
    graphs, labels = load_data("datasets/train")
    train_inputs, train_targets, val_inputs, val_targets = utils.split_train_val(
        graphs, labels, val_rate=0.3)

    model = GNNModel(8)
    loss_func = BinaryCrossEntropy()
    optimizer = Adam()
    batch_generator = utils.BatchGenerator(batch_size=32)

    min_loss = 100000
    for epoch in range(50):
        print(f"Epoch{epoch + 1}")

        train_losses = []
        for inputs, targets in batch_generator.generator(
                train_inputs, train_targets):
            train_loss, loss_grad = loss_func(model,
                                              inputs,
                                              targets,
                                              is_grad=True)
            optimizer.update(model, loss_grad)

            train_losses.append(train_loss)

        train_mean_loss = np.mean(train_losses)
        pred = np.array([model.predict(input_)
                         for input_ in train_inputs]).squeeze()
        train_accuracy = accuracy(pred, train_targets)

        val_losses = []
        for inputs, targets in batch_generator.generator(
                val_inputs, val_targets):
            val_loss, _ = loss_func(model, inputs, targets, is_grad=False)
            val_losses.append(val_loss)

        val_mean_loss = np.mean(val_losses)
        pred = np.array([model.predict(input_)
                         for input_ in val_inputs]).squeeze()
        val_accuracy = accuracy(pred, val_targets)

        if min(min_loss, val_mean_loss) < min_loss:
            min_loss = val_mean_loss
            print(
                f"Train loss: {train_mean_loss}\tTrain accuracy: {train_accuracy}"
            )
            print(
                f"Validation loss: {val_mean_loss}\tValidation accuracy: {val_accuracy}"
            )
            print("")
예제 #2
0
def train(data_file):
    EPOCHS_PERIOD = 1
    BATCH_SIZE = 32
    STEPS_PER_EPOCH = 41128 // BATCH_SIZE
    model = M.yolo_darknet19(input_shape=(416, 416, 3), output_depth=5)
    model.load_weights('darknet19_weights_full.h5')
    optimizer = Adam(lr=1e-3,
                     beta_1=0.9,
                     beta_2=0.999,
                     epsilon=1e-08,
                     decay=0.0)
    model.compile(optimizer=optimizer, loss=Y.yolo_loss)

    checkpoint = ModelCheckpoint('darknet19_weights.h5',
                                 monitor='val_loss',
                                 verbose=1,
                                 period=EPOCHS_PERIOD,
                                 save_weights_only=True,
                                 save_best_only=True)

    tensorboard = TensorBoard('logs',
                              histogram_freq=EPOCHS_PERIOD,
                              write_graph=True,
                              write_images=True,
                              write_grads=True)

    model.fit_generator(generator=utils.BatchGenerator('data.h5', BATCH_SIZE),
                        steps_per_epoch=STEPS_PER_EPOCH,
                        epochs=128,
                        verbose=1,
                        shuffle=True,
                        max_queue_size=16,
                        validation_data=utils.load_data('data.h5', 300, 4),
                        callbacks=[checkpoint, tensorboard],
                        initial_epoch=3)

    return model
예제 #3
0
    
ratio_train = [[] for i in range(len(sources.keys()))]
for key in sources.keys():
    ratio_train[sources[key]] = ratio[key]['train']


tf.reset_default_graph()  
graph = tf.get_default_graph()
model = DATS(options)
sess =  tf.Session(graph = graph, config=tf.ConfigProto(gpu_options=gpu_options)) 
tf.global_variables_initializer().run(session = sess)

record = []
gen_source_batches = []
for key in sources.keys():
    gen_source_batches.append(utils.BatchGenerator([datasets[key]['train']['images'], datasets[key]['train']['labels'], datasets[key]['train']['domains']], options['batch_size']))
gen_target_batch = utils.BatchGenerator([datasets[targets.keys()[0]]['train']['images'], datasets[targets.keys()[0]]['train']['labels'], datasets[targets.keys()[0]]['train']['domains']], options['num_target'])
save_path = './Result/DATS_' +  targets.keys()[0] + '/'
if not os.path.exists(save_path):
    os.mkdir(save_path)


best_valid = -1
best_acc = -1
d_pred = None

print('Training...')
source_features = utils.GetDataPred(sess, model, 'feature', source_train['images'])
target_features = utils.GetDataPred(sess, model, 'feature', target_test['images'])
source_mus, target_mu = model.GetMus(np.concatenate([source_features, target_features], 0),
                                    source_train['labels'],
예제 #4
0
def main():
    which = ['simulator', 'real'][1]  #switch
    model_name = ['nvidia', 'darknet53', ''][1]

    checkpoint_path, data, n_train_samples = None, None, None
    if which == 'simulator':
        # n_total_samples = 34386
        n_train_samples = 27510  # 6876 test
        checkpoint_path = '/media/aayush/Other/beta_simulator_linux/checkpoints/weights.{epoch:02d}-{val_loss:.2f}'
        data = pd.read_csv(
            '/media/aayush/Other/beta_simulator_linux/driving_log_mega.csv')
    elif which == 'real':
        # n_total_samples = 28382
        n_train_samples = 22706  #todo
        checkpoint_path = '/media/aayush/Other/Udacity Data Real/CH2_002/output/checkpoints_real/' + model_name \
                          + '/weights.{epoch:02d}-{val_loss:.2f}'
        data = pd.read_csv(
            '/media/aayush/Other/Udacity Data Real/CH2_002/output/filtered_only_center.csv'
        )

    batch_size = 50
    image_height = 105  #140 #66
    image_width = 240  #320 #200

    #HPs:
    learning_rate = 0.001  #todo: increased!, last was 0.0001
    n_epochs = 50

    image_paths = data['img'].values
    angles = data['angle'].values
    angles = 2 * ((angles - np.min(angles)) /
                  (np.max(angles) - np.min(angles))) - 1

    X_train = image_paths[0:n_train_samples]
    y_train = angles[0:n_train_samples]

    X_test = image_paths[n_train_samples:]
    y_test = angles[n_train_samples:]

    model = build_model(batch_size, image_height, image_width, model_name)
    model.summary()
    model.compile(optimizer=tfk.optimizers.Adam(lr=learning_rate),
                  loss='mean_squared_error',
                  metrics=['accuracy'])

    train_batch_generator = utils.BatchGenerator(X_train, y_train, batch_size,
                                                 True, image_height,
                                                 image_width)

    # bx, by = train_batch_generator.__getitem__(22)
    # img, angle = bx[0], by[0]
    # img = img.astype('uint8')
    # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    # plt.imshow(img)
    # print(angle)
    # plt.show()

    val_batch_generator = utils.BatchGenerator(X_test, y_test, batch_size,
                                               False, image_height,
                                               image_width)

    ckpt_callback = tfk.callbacks.ModelCheckpoint(checkpoint_path,
                                                  save_weights_only=True,
                                                  verbose=1,
                                                  monitor='val_loss',
                                                  save_best_only=True,
                                                  mode='min')

    tensorboard_callback = tfk.callbacks.TensorBoard(
        log_dir='/media/aayush/Other/Udacity Data Real/CH2_002/output/' +
        model_name + '_logs',
        histogram_freq=0,
        batch_size=50,
        write_graph=True,
        write_grads=True,
        update_freq='batch')
    history = model.fit_generator(
        generator=train_batch_generator,
        epochs=n_epochs,
        verbose=2,
        validation_data=val_batch_generator,
        validation_freq=1,
        workers=8,
        use_multiprocessing=True,
        shuffle=True,
        callbacks=[ckpt_callback, tensorboard_callback])

    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.show()
    def DoTrain(self):
        #self.train_model.load_weights("../data/voc2012_2/train/notop.h5", by_name=True)
        self.train_model.load_weights("epoch_9.h5", by_name=True)

        learning_rate = 1e-4
        optimizer = Adam(lr=learning_rate, clipnorm=0.001)
        self.train_model.compile(loss=self.dummy_loss, optimizer=optimizer)

        # delete difficult
        for inst_idx in range(len(self.train_ints) - 1, -1, -1):
            inst = self.train_ints[inst_idx]
            for obj_idx in range(len(inst['object']) - 1, -1, -1):
                obj = inst['object'][obj_idx]
                if obj['difficult'] == '1':
                    del self.train_ints[inst_idx]['object'][obj_idx]
            if inst['object'] == []:
                del self.train_ints[inst_idx]
        for inst_idx in range(len(self.valid_ints) - 1, -1, -1):
            inst = self.valid_ints[inst_idx]
            for obj_idx in range(len(inst['object']) - 1, -1, -1):
                obj = inst['object'][obj_idx]
                if obj['difficult'] == '1':
                    del self.valid_ints[inst_idx]['object'][obj_idx]
            if inst['object'] == []:
                del self.valid_ints[inst_idx]

        train_generator = utils.BatchGenerator(
            instances=self.train_ints,
            anchors=self.anchors,
            labels=self.labels,
            downsample=
            32,  # ratio between network input's size and network output's size, 32 for YOLOv3
            max_box_per_image=self.max_box_per_image,
            batch_size=int(self.batch_size),
            min_net_size=416 - 64,
            max_net_size=416 + 64,
            shuffle=True,
            jitter=0.3,
            norm=self.normalize)
        valid_generator = utils.BatchGenerator(
            instances=self.valid_ints,
            anchors=self.anchors,
            labels=self.labels,
            downsample=
            32,  # ratio between network input's size and network output's size, 32 for YOLOv3
            max_box_per_image=self.max_box_per_image,
            batch_size=int(self.batch_size),
            min_net_size=416,
            max_net_size=416,
            shuffle=True,
            jitter=0.0,
            norm=self.normalize)

        for i in range(10):
            self.train_model.fit_generator(
                generator=train_generator,
                steps_per_epoch=len(train_generator),
                epochs=10,
                verbose=2,
                validation_data=valid_generator)
            self.train_model.save('epoch_' + str(i) + '.h5')
예제 #6
0
flags.DEFINE_string('text_modeling', 'chr',
                    'chr: character-based, syl: syllable')
flags.DEFINE_string('train_dir', 'data/korean-english-park.train.ko',
                    'training dataset')
flags.DEFINE_string('save_dir', 'save/model', 'training dataset')
flags.DEFINE_string('load_dir', None, 'continue learning from this model')
flags.DEFINE_string('log_dir', 'log', 'training dataset')
flags.DEFINE_float('alpha', 1e-4, 'alpha for adam')
flags.DEFINE_float('grad_clip', 5., 'gradient clip')
flags.DEFINE_integer('hidden_size', 128, 'hidden size')
flags.DEFINE_integer('n_epochs', 50, '# of epochs')
flags.DEFINE_integer('batch_size', 64, '# of batch size')
flags.DEFINE_integer('seq_length', 64, 'truncated backprop length for seq')
args = flags.FLAGS

train_loader = utils.BatchGenerator(args.text_modeling, args.train_dir)

model = models.CHAR_RNN(args.hidden_size, train_loader.vocab_size)

optimizer = tf.train.AdamOptimizer(learning_rate=args.alpha)
gradients, variables = zip(*optimizer.compute_gradients(model.loss))
gradients, _ = tf.clip_by_global_norm(gradients, args.grad_clip)
train_op = optimizer.apply_gradients(zip(gradients, variables))

sess = tf.Session()
writer = tf.summary.FileWriter(args.log_dir, sess.graph)
loss_log = tf.placeholder(tf.float32, name='loss_log')
loss_summary = tf.summary.scalar('loss_summary', loss_log)
sess.run(tf.global_variables_initializer())

saver = tf.train.Saver()