Пример #1
0
        repeat_size=repeat_size,
        batch_size=batch_size,
        num_parallel_workers=num_parallel_workers,
        is_training=is_training)

    return cifar_ds


if __name__ == '__main__':
    args_opt = parse_args()
    context.set_context(mode=context.GRAPH_MODE,
                        device_target=args_opt.device_target)

    # download cifar10 dataset
    if not args_opt.dataset_path:
        args_opt.dataset_path = download_dataset('cifar10')
    # build the network
    if args_opt.do_eval and args_opt.load_pretrained == 'hub':
        from tinyms import hub
        net = hub.load(args_opt.hub_uid, class_num=args_opt.num_classes)
    else:
        net = vgg16(class_num=args_opt.num_classes)
    net.update_parameters_name(prefix='huawei')
    model = Model(net)
    # define the loss function
    net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
    # define the optimizer
    net_opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()),
                       0.01, 0.9)
    model.compile(loss_fn=net_loss,
                  optimizer=net_opt,
Пример #2
0
        mnist_ds,
        repeat_size=repeat_size,
        batch_size=batch_size,
        num_parallel_workers=num_parallel_workers)

    return mnist_ds


if __name__ == "__main__":
    args_opt = parse_args()
    context.set_context(mode=context.GRAPH_MODE,
                        device_target=args_opt.device_target)

    # download mnist dataset
    if not args_opt.dataset_path:
        args_opt.dataset_path = download_dataset('mnist')
    # build the network
    net = lenet5()
    model = Model(net)
    # define the loss function
    net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
    # define the optimizer
    net_opt = Momentum(net.trainable_params(), 0.01, 0.9)
    model.compile(net_loss, net_opt, metrics={"Accuracy": Accuracy()})

    epoch_size = args_opt.epoch_size
    batch_size = args_opt.batch_size
    mnist_path = args_opt.dataset_path
    dataset_sink_mode = not args_opt.device_target == "CPU"

    if args_opt.do_eval:  # as for evaluation, users could use model.eval
Пример #3
0
        json_dict['categories'].append(cat)

    anno_file = os.path.join(anno_dir, 'annotation.json')
    with open(anno_file, 'w') as f:
        json.dump(json_dict, f)
    return anno_file


if __name__ == '__main__':
    args_opt = parse_args()
    context.set_context(mode=context.GRAPH_MODE,
                        device_target=args_opt.device_target)

    # download voc dataset
    if not args_opt.dataset_path:
        args_opt.dataset_path = download_dataset('voc')

    epoch_size = args_opt.epoch_size
    batch_size = args_opt.batch_size
    voc_path = args_opt.dataset_path
    dataset_sink_mode = not args_opt.device_target == "CPU"

    if not args_opt.do_eval:  # as for train, users could use model.train
        ds_train = create_dataset(voc_path, batch_size=batch_size)
        dataset_size = ds_train.get_dataset_size()
        # build the SSD300 network
        net = ssd300_mobilenetv2(class_num=args_opt.num_classes)
        # define the loss function
        if args_opt.device_target == "GPU":
            net.to_float(ts.float16)
        net = net_with_loss(net)
Пример #4
0
def test_download_dataset_voc():
    download_dataset(dataset_name='voc', local_path='/tmp')

    assert os.path.exists('/tmp/voc/VOCdevkit/VOC2007')
Пример #5
0
def test_download_dataset_cifar100():
    download_dataset(dataset_name='cifar100', local_path='/tmp')

    assert os.path.exists('/tmp/cifar100/cifar-100-bin/train.bin')
    assert os.path.exists('/tmp/cifar100/cifar-100-bin/test.bin')
Пример #6
0
def test_download_dataset_cifar10():
    download_dataset(dataset_name='cifar10', local_path='/tmp')

    assert os.path.exists('/tmp/cifar10/cifar-10-batches-bin/batches.meta.txt')
Пример #7
0
def test_download_dataset_mnist():
    download_dataset(dataset_name='mnist', local_path='/tmp')

    assert os.path.exists('/tmp/mnist/train')
    assert os.path.exists('/tmp/mnist/test')
Пример #8
0
    train_ds = kaggle_display_advertising_ds.load_mindreocrd_dataset(
        usage='train', batch_size=batch_size)
    eval_ds = kaggle_display_advertising_ds.load_mindreocrd_dataset(
        usage='test', batch_size=batch_size)

    return train_ds, eval_ds


if __name__ == "__main__":
    args_opt = parse_args()
    context.set_context(mode=context.GRAPH_MODE,
                        device_target=args_opt.device_target)

    # download kaggle display advertising dataset
    if not args_opt.dataset_path:
        args_opt.dataset_path = download_dataset('kaggle_display_advertising')
    else:
        args_opt.dataset_path = os.path.join(args_opt.dataset_path,
                                             "kaggle_display_advertising")

    epoch_size = args_opt.epoch_size
    batch_size = args_opt.batch_size
    dataset_path = args_opt.dataset_path
    dataset_sink_mode = not args_opt.device_target == "CPU"
    checkpoint_dir = args_opt.checkpoint_dir if args_opt.checkpoint_dir is not None else "."

    # create train and eval dataset
    train_ds, eval_ds = create_dataset(data_path=dataset_path,
                                       batch_size=batch_size)
    # build base network
    data_size = train_ds.get_dataset_size()