Esempio n. 1
0
def train():
    with tf.Session() as sess:
        # initialization
        graph = build_graph()
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())

        # multi thread
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        # log writer
        train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train', sess.graph)
        test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/val')

        # restore model
        if FLAGS.restore:
            ckpt = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
            if ckpt:
                saver.restore(sess, ckpt)
                print("restore from the checkpoint {}".format(ckpt))

        # training begins
        try:
            while not coord.should_stop():
                for step, (x_batch, y_batch) in enumerate(train_data_iterator()):
                    start_time = time.time()
                    feed_dict = {graph['images']: x_batch,
                                 graph['labels']: y_batch,
                                 graph['keep_prob']: 0.8,
                                 graph['is_training']: True}
                    train_opts = [graph['train_op'], graph['loss'], graph['merged_summary_op']]
                    _, loss_val, train_summary = sess.run(train_opts, feed_dict=feed_dict)

                    train_writer.add_summary(train_summary, step)
                    end_time = time.time()
                    print("the step {0} takes {1} loss {2}".format(step, end_time - start_time, loss_val))

                    # eval stage
                    if step % FLAGS.eval_steps == 0:
                        x_batch_test, y_batch_test = test_data_helper(128)
                        feed_dict = {graph['images']: x_batch_test,
                                     graph['labels']: y_batch_test,
                                     graph['keep_prob']: 1.0,
                                     graph['is_training']: False}
                        test_opts = [graph['accuracy'], graph['merged_summary_op']]
                        accuracy_test, test_summary = sess.run(test_opts, feed_dict=feed_dict)
                        test_writer.add_summary(test_summary, step)
                        print('===============Eval a batch=======================')
                        print('the step {0} test accuracy: {1}'.format(step, accuracy_test))
                        print('===============Eval a batch=======================')
                    # save stage
                    if step % FLAGS.save_steps == 0 and step > FLAGS.min_save_steps:
                        saver.save(sess, os.path.join(FLAGS.checkpoint_dir, FLAGS.model_name), global_step=step)
        except tf.errors.OutOfRangeError:
            print('==================Train Finished================')
            saver.save(sess, os.path.join(FLAGS.checkpoint_dir, FLAGS.model_name), global_step=step)
        finally:
            coord.request_stop()
        coord.join(threads)
Esempio n. 2
0
def train():
    X_shape = (FLAGS.image_height, FLAGS.image_width, 3)
    X_tensor = Input(shape=X_shape)
    resnet = VGG16(include_top=False,
                   weights='imagenet',
                   input_tensor=X_tensor,
                   input_shape=X_shape,
                   pooling=None)
    flatten = Flatten()(resnet.output)
    output = Dense(FLAGS.charset_size * FLAGS.captcha_size)(flatten)
    model = Model(inputs=[X_tensor], outputs=[output])
    model.compile(optimizer='adam',
                  loss=sigmoid_loss,
                  metrics=[captcha_accuracy])

    for epoch in range(FLAGS.num_epochs):
        for i, (X_train, y_train) in enumerate(train_data_iterator()):
            X_train, y_train = np.array(X_train), np.array(y_train)
            loss = model.train_on_batch(X_train, y_train)
            print(f"epoch: {epoch} step: {i} loss: {loss}")

            if i % 100 == 0:
                X_test, y_test = test_data_helper()
                X_test, y_test = np.array(X_test), np.array(y_test)
                score = model.test_on_batch(X_test, y_test)
                print(f"score: {score}")
Esempio n. 3
0
def predict():
    data, target = test_data_helper()
    data, target = torch.FloatTensor(data), torch.LongTensor(target)
    output = net(data)
    pred = torch.argmax(output, 1)
    eqs = torch.eq(pred, target)
    accuracy = eqs.sum().item() / len(target)
    print(accuracy)
Esempio n. 4
0
def train():
    # model
    X_shape = (FLAGS.image_height, FLAGS.image_width, 3)
    model = Sequential()
    
    model.add(Conv2D(128, kernel_size=3, padding=1, strides=1, input_shape=X_shape))
    model.add(advanced_activations.LeakyReLU(alpha=0.3))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    # conv => RELU => POOL
    model.add(Conv2D(256, kernel_size=3, padding=1, strides=1))
    model.add(advanced_activations.LeakyReLU(alpha=0.3))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    # flatten => RELU layers
    model.add(Conv2D(512, kernel_size=3, padding=1, strides=1))
    model.add(advanced_activations.LeakyReLU(alpha=0.3))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    
    model.add(Conv2D(1024, kernel_size=3, padding=1, strides=1))
    model.add(advanced_activations.LeakyReLU(alpha=0.3))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    
    model.add(Dense(FLAGS.charset_size)
    model.add(Activation('softmax'))

    optimizer = Adam()
    model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])


    for epoch in range(FLAGS.num_epochs):
        for i, (X_train, y_train) in enumerate(train_data_iterator()):
            X_train, y_train = np.array(X_train), np.array(y_train)
            loss = model.train_on_batch(X_train, y_train)
            print(f"epoch: {epoch} step: {i} loss: {loss}")

            if i % 100 == 0:
                X_test, y_test = test_data_helper()
                X_test, y_test = np.array(X_test), np.array(y_test)
                score = model.test_on_batch(X_test, y_test)
                print(f"score: {score}")

if __name__ == '__main__':
    train()
Esempio n. 5
0
def train():
    X_shape = (FLAGS.image_height, FLAGS.image_width, 3)
    X_tensor = Input(shape=X_shape)
    resnet = VGG16(include_top=False,
                   weights='imagenet',
                   input_tensor=X_tensor,
                   input_shape=X_shape,
                   pooling=None)
    flatten = Flatten()(resnet.output)
    output = Dense(FLAGS.charset_size * FLAGS.captcha_size)(flatten)
    model = Model(inputs=[X_tensor], outputs=[output])
    model.compile(optimizer='adam',
                  loss=sigmoid_loss,
                  metrics=[captcha_accuracy])

    aug = ImageDataGenerator(rotation_range=10,
                             width_shift_range=0.1,
                             height_shift_range=0.1,
                             horizontal_flip=False,
                             fill_mode="nearest")

    for epoch in range(FLAGS.num_epochs):
        for i, (X_train, y_train) in enumerate(train_data_iterator()):
            X_train, y_train = np.array(X_train), np.array(y_train)
            imageGen = aug.flow(X_train, batch_size=FLAGS.batch_size)
            for X_train in imageGen:
                aug_X_train = X_train
                break
            X_train = aug_X_train
            loss = model.train_on_batch(X_train, y_train)
            print(f"epoch: {epoch} step: {i} loss: {loss}")

            if i % 100 == 0:
                X_test, y_test = test_data_helper()
                X_test, y_test = np.array(X_test), np.array(y_test)
                score = model.test_on_batch(X_test, y_test)
                print(f"score: {score}")
Esempio n. 6
0
    if include_top:
        x = Flatten(name='flatten')(x)
        x = Dropout(0.05)(x)
        x = Dense(1024, activation='relu', name='fc2')(x)
        x = Dense(classes, activation='softmax', name='predictions')(x)

    model = Model(img_input, x, name='model')
    return model


model = build_model(classes=FLAGS.wordset_size)

model.compile(
    loss='categorical_crossentropy',
    optimizer='rmsprop',
    metrics=['accuracy'],
)

for epoch in range(FLAGS.num_epochs):
    for i, (X_train, y_train) in enumerate(train_data_iterator()):
        X_train, y_train = np.array(X_train), np.array(y_train)
        loss = model.train_on_batch(X_train, y_train)
        print(f"epoch: {epoch} step: {i} loss: {loss}")

        if i % 100 == 0:
            X_test, y_test = test_data_helper()
            X_test, y_test = np.array(X_test), np.array(y_test)
            score = model.test_on_batch(X_test, y_test)
            print(f"score: {score}")
            if score[1] > 0.5:
                model.save('model.h5')
Esempio n. 7
0
 for i, (input, target) in enumerate(train_data_iterator()):
     input = torch.FloatTensor(input)
     target = torch.LongTensor(target)
     input_var = torch.autograd.Variable(input.cuda(async=True))
     target_var = torch.autograd.Variable(target.cuda(async=True))
     # feed
     output = net(input_var)
     loss = loss_fn(output, target)
     # backwwrd
     optimizer.zero_grad()
     loss.backward()
     optimizer.step()
     # accuracy
     if (i + 1) % 100 == 0:
         net.eval()
         data, label = test_data_helper()
         data, label = Variable(data).cuda(), Variable(label).cuda()
         with torch.no_grad():
             output = net(data)
             batch_size = output.size(0)
             pred = torch.argmax(output.view(-1, config.captlen,
                                             config.charlen),
                                 dim=2)
             label = torch.argmax(label.view(-1, config.captlen,
                                             config.charlen),
                                  dim=2)
             accuracy = torch.eq(
                 pred, label).sum().item() / (batch_size * config.captlen)
         if accuracy > best_acc:
             best_acc = accuracy
             net.save('best.pth')
Esempio n. 8
0
                target).cuda()

            output = net(data)
            loss = loss_fn(output, target)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            print("Train epoch: {} batch: {} loss: {}".format(
                epoch_, batch_idx, loss))

            if batch_idx % 100 == 0:
                net.eval()
                with torch.no_grad():
                    data, target = test_data_helper()
                    data, target = torch.FloatTensor(
                        data).cuda(), torch.LongTensor(target).cuda()
                    output = net(data)
                    pred_t = target
                    pred_p = torch.argmax(output, 1)
                    num = pred_t.size(0)
                    sum = torch.sum(torch.eq(pred_t, pred_p))
                    accuracy = sum.item() / num
                    print('accuracy : {}'.format(accuracy))
                    if accuracy > best_acc:
                        net.cpu()
                        torch.save(net, 'best.pth')
                        best_acc = accuracy
                        print(
                            "save model. best accuracy is {}".format(best_acc))