Example #1
0
def make_tensor_model(staging_area_callback, num_classes):
    model = ResNet50(input_tensor=staging_area_callback.input_tensor,
        classes=num_classes, weights=None)
    model.compile(optimizer='sgd', loss='categorical_crossentropy',
        target_tensors=[staging_area_callback.target_tensor],
        feed_dict=staging_area_callback.feed_dict,
        fetches=staging_area_callback.extra_ops)
    return model

num_classes = 1000
dataset_size = 1024
batch_size = 32
epochs = 5

x_train, y_train = create_synth_imagenet(224, dataset_size)
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')

# last batch might be smaller
steps_per_epoch = int(math.ceil(len(x_train) / batch_size))

gauge = SamplesPerSec(batch_size)
staging_area_callback = StagingAreaCallback(x_train, y_train, batch_size)

print('training plain model:')
plain_model = make_plain_model(num_classes)
plain_model.fit(x_train, y_train, batch_size, epochs=epochs, callbacks=[gauge])

print('training pipelined model:')
pipelined_model = make_tensor_model(staging_area_callback, num_classes)
Example #2
0
    parser.add_argument('-b',
                        '--batch-size',
                        default=32,
                        type=int,
                        help='Batch size')

    return parser.parse_args()


if __name__ == '__main__':
    args = parse_args()

    print('architecture:', args.arch)
    print('number of GPUs:', args.gpus)
    print('parameter server:', args.parameter_server)
    print('parallelization method:', args.method)
    print('epochs:', args.epochs)
    print('batch_size:', args.batch_size)

    dataset_size = 1024
    if args.arch == 'inception3':
        create_model = InceptionV3
        X, y = create_synth_imagenet(299, dataset_size)

    elif args.arch == 'resnet50':
        create_model = ResNet50
        X, y = create_synth_imagenet(224, dataset_size)

    train(create_model, X, y, args.batch_size, args.epochs, args.gpus,
          args.parameter_server, args.method)