예제 #1
0
                    default='rmsprop',
                    help="Name of optimizer (rmsprop or sgd)")
parser.set_defaults(batch_size=4, num_iterations=10000000, iter_interval=2000)
args = parser.parse_args()

# Set the random seed
np.random.seed(1)
# Number of outputs of last layer.
ax.Y.length = 1000
ax.N.length = args.batch_size

# Build AEON data loader objects
train_set, valid_set = make_aeon_loaders(
    train_manifest=args.train_manifest_file,
    valid_manifest=args.valid_manifest_file,
    batch_size=args.batch_size,
    train_iterations=args.num_iterations,
    dataset='i1k',
    datadir=args.image_dir)
inputs = train_set.make_placeholders(include_iteration=True)

# Input size is 299 x 299 x 3
image_size = 299

# Build the network
inception = inception.Inception(mini=args.mini)

# Declare the optimizer
if args.optimizer_name == 'sgd':
    learning_rate_policy = {
        'name': 'schedule',
예제 #2
0
        '--stage_depth',
        type=int,
        default=2,
        help='depth of each stage (network depth will be 9n+2)')
    parser.add_argument('--use_aeon',
                        action='store_true',
                        help='whether to use aeon dataloader')
    args = parser.parse_args()

    np.random.seed(args.rng_seed)

    # Create the dataloader
    if args.use_aeon:
        from data import make_aeon_loaders
        train_set, valid_set = make_aeon_loaders(args.data_dir,
                                                 args.batch_size,
                                                 args.num_iterations)
    else:
        from ngraph.frontends.neon import ArrayIterator  # noqa
        from ngraph.frontends.neon import CIFAR10  # noqa
        train_data, valid_data = CIFAR10(args.data_dir).load_data()
        train_set = ArrayIterator(train_data,
                                  args.batch_size,
                                  total_iterations=args.num_iterations)
        valid_set = ArrayIterator(valid_data, args.batch_size)

    # we need to ask the dataset to create an iteration
    # placeholder for our learning rate schedule
    inputs = train_set.make_placeholders(include_iteration=True)
    ax.Y.length = 10
예제 #3
0
if device_backend == 'hetr':
    if 'HETR_AEON_IP' not in os.environ or 'HETR_AEON_PORT' not in os.environ:
        raise ValueError(
            'To run hetr with more than one device, you need to set an ip address \
            for the HETR_AEON_IP environment variable and a port number for the HETR_AEON_PORT \
            environment variable')

    aeon_address = os.environ['HETR_AEON_IP']
    aeon_port = int(os.environ['HETR_AEON_PORT'])

# Create training and validation dataset objects
train_set, valid_set = make_aeon_loaders(args.data_dir,
                                         args.batch_size,
                                         args.num_iterations,
                                         dataset=args.dataset,
                                         num_devices=args.num_devices,
                                         device=device_backend,
                                         split_batch=True,
                                         address=aeon_address,
                                         port=aeon_port)
print("Completed loading " + args.dataset + " dataset")

# Make input_ops or placeholders depending on single device or multi device compute
input_ops_train = train_set.make_input_ops(aeon_address, aeon_port, ax.N,
                                           device_hetr, device_id)
input_ops_valid = valid_set.make_input_ops(aeon_address, aeon_port, ax.N,
                                           device_hetr, device_id)

with ng.metadata(device=device_hetr, device_id=device_id, parallel=ax.N):
    # Build the network
    resnet = BuildResnet(args.dataset,