Ejemplo n.º 1
0
    flatten = mx.symbol.Flatten(data=avg, name='flatten')
    fc1 = mx.symbol.FullyConnected(data=flatten,
                                   num_hidden=nhidden,
                                   name='fc1')
    softmax = mx.symbol.Softmax(data=fc1, name='softmax')
    return softmax


softmax = inception(21841, 1.0)

batch_size = 64
num_gpu = 4
gpus = [mx.gpu(i) for i in range(num_gpu)]
input_shape = (3, 224, 224)

train = ilsvrc12_iterator(batch_size=batch_size, input_shape=(3, 224, 224))

model_prefix = "model/Inception-Full"
num_round = 10

logging.info(
    "This script is used to train ImageNet fullset over 21841 classes.")
logging.info("For noraml 1000 classes problem, please use inception.py")

model = mx.model.FeedForward(ctx=gpus,
                             symbol=softmax,
                             num_round=num_round,
                             learning_rate=0.05,
                             momentum=0.9,
                             wd=0.00001)
Ejemplo n.º 2
0
    # global avg pooling
    avg = mx.symbol.Pooling(data=in5b, kernel=(7, 7), stride=(1, 1), name="global_pool", pool_type='avg')
    # linear classifier
    flatten = mx.symbol.Flatten(data=avg, name='flatten')
    fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=nhidden, name='fc1')
    softmax = mx.symbol.Softmax(data=fc1, name='softmax')
    return softmax

softmax = inception(21841, 1.0)

batch_size = 64
num_gpu = 4
gpus = [mx.gpu(i) for i in range(num_gpu)]
input_shape = (3, 224, 224)

train = ilsvrc12_iterator(batch_size=batch_size, input_shape=(3,224,224))

model_prefix = "model/Inception-Full"
num_round = 10

logging.info("This script is used to train ImageNet fullset over 21841 classes.")
logging.info("For noraml 1000 classes problem, please use inception.py")

model = mx.model.FeedForward(ctx=gpus, symbol=softmax, num_round=num_round,
                             learning_rate=0.05, momentum=0.9, wd=0.00001)

model.fit(X=train,
          eval_metric="acc",
          batch_end_callback=[mx.callback.Speedometer(batch_size), mx.callback.log_train_metric(100)],
	      epoch_end_callback=mx.callback.do_checkpoint(model_prefix))
Ejemplo n.º 3
0
# stage 4
flatten = mx.symbol.Flatten(data=pool3)
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=4096)
relu6 = mx.symbol.Activation(data=fc1, act_type="relu")
dropout1 = mx.symbol.Dropout(data=relu6, p=0.5)
# stage 5
fc2 = mx.symbol.FullyConnected(data=dropout1, num_hidden=4096)
relu7 = mx.symbol.Activation(data=fc2, act_type="relu")
dropout2 = mx.symbol.Dropout(data=relu7, p=0.5)
# stage 6
fc3 = mx.symbol.FullyConnected(data=dropout2, num_hidden=1000)
softmax = mx.symbol.Softmax(data=fc3)


## data
train, val = ilsvrc12_iterator(batch_size=256, input_shape=(3,224,224))

## train
num_gpus = 2
gpus = [mx.gpu(i) for i in range(num_gpus)]
model = mx.model.FeedForward(
    ctx           = gpus,
    symbol        = softmax,
    num_round     = 20,
    learning_rate = 0.01,
    momentum      = 0.9,
    wd            = 0.00001)
logging.basicConfig(level = logging.DEBUG)
model.fit(X = train, eval_data = val,
          epoch_end_callback = mx.callback.Speedometer(100))