Ejemplo n.º 1
0
    print("Output class: ", np.argmax(workspace.FetchBlob("softmax"), axis=1))
    print("Real class  : ", label)


if __name__ == '__main__':

    # 1. Set global init level & Device Option: CUDA or CPU
    core.GlobalInit(['caffe2', '--caffe2_log_level=0'])
    if args.use_gpu:
        device_opts = core.DeviceOption(caffe2_pb2.CUDA, 0)
    else:
        device_opts = core.DeviceOption(caffe2_pb2.CPU, 0)

    # 2. Prepare data
    # try to download & extract
    # then do shuffle & -std/mean normalization
    train_x, train_y, test_x, test_y = prepare_data()
    train_x, test_x = normalization(train_x, test_x)

    # 3. Start training & save pb files.
    do_train(train_x,
             train_y,
             test_x,
             test_y,
             epochs=args.epochs,
             device_opts=device_opts,
             use_gpu=args.use_gpu)

    # 4. Do a test if you need
    do_test(test_x, test_y, device_opts)
Ejemplo n.º 2
0
def train(model):
    batch_size = 64
    num_classes = 10
    epochs = 100
    x_train, y_train, x_test, y_test = data_utility.prepare_data()

    #     x_train = x_train.astype('float32')
    #     x_test = x_test.astype('float32')
    x_train, x_test = x_train / 255.0, x_test / 255.0
    print('x_train shape:', x_train.shape, 'x_test shape:', x_test.shape)
    #     y_train = utils.to_categorical(y_train, num_classes)
    #     y_test = utils.to_categorical(y_test, num_classes)

    # sgd = SGD(lr=0.0001, decay=1e-8, momentum=0.9, nesterov=True)
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model.summary()
    data_augmentation = True

    if not data_augmentation:
        print('Not using data augmentation.')
        model.fit(x_train,
                  y_train,
                  batch_size=batch_size,
                  epochs=epochs,
                  validation_data=(x_test, y_test),
                  shuffle=True)
    else:
        print('Using real-time data augmentation.')
        # This will do preprocessing and realtime data augmentation:
        datagen = ImageDataGenerator(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=False,  # divide inputs by dataset std
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            zca_epsilon=1e-06,  # epsilon for ZCA whitening
            rotation_range=0,  # randomly rotate images in 0 to 180 degrees
            width_shift_range=0.1,  # randomly shift images horizontally
            height_shift_range=0.1,  # randomly shift images vertically
            shear_range=0.,  # set range for random shear
            zoom_range=0.,  # set range for random zoom
            channel_shift_range=0.,  # set range for random channel shifts
            # set mode for filling points outside the input boundaries
            fill_mode='nearest',
            cval=0.,  # value used for fill_mode = "constant"
            horizontal_flip=True,  # randomly flip images
            vertical_flip=False,  # randomly flip images
            # set rescaling factor (applied before any other transformation)
            rescale=None,
            # set function that will be applied on each input
            preprocessing_function=None,
            # image data format, either "channels_first" or "channels_last"
            data_format=None,
            # fraction of images reserved for validation (strictly between 0 and 1)
            validation_split=0.0)

        # Compute quantities required for feature-wise normalization
        # (std, mean, and principal components if ZCA whitening is applied).
        datagen.fit(x_train)

        # Fit the model on the batches generated by datagen.flow().
        model.fit_generator(datagen.flow(x_train,
                                         y_train,
                                         batch_size=batch_size),
                            epochs=epochs,
                            validation_data=(x_test, y_test),
                            workers=4)
Ejemplo n.º 3
0
                               label_path)
            continue

        try:
            tf_example = dict_to_tf_example(image_path, label_path)
            writer.write(tf_example.SerializeToString())
        except ValueError:
            tf.logging.warning('Invalid example: %s, ignoring.', example)

    writer.close()


if __name__ == "__main__":

    # debug
    seq_list = load_data_names("/cvgl/group/GazeCapture/test")

    batch_size = len(seq_list)
    dataset_path = "/media/insfan/00028D8D000E9194/MPIIFaceGaze/MPIIFaceGaze/MPIIFaceGaze_fem64.npz"

    train_data = load_data(dataset_path)
    data_utility.prepare_data(train_data)

    # img_ch = 3
    # img_cols = 64
    # img_rows = 64
    #
    # test_batch = load_batch_from_names_random(seq_list, dataset_path, batch_size, 64, 64, 3)
    #
    # print("Loaded: {} data".format(len(test_batch[0][0])))