training_rdd=train_data,
    criterion=ClassNLLCriterion(),
    optim_method=SGD(learningrate=0.4, learningrate_decay=0.0002),
    end_trigger=MaxEpoch(5),
    batch_size=2048)

# Set the validation logic
optimizer.set_validation(
    batch_size=2048,
    val_rdd=test_data,
    trigger=EveryEpoch(),
    val_method=[Top1Accuracy()]
)

# generate summaries and start tensortboard if needed
(train_summary, val_summary) = generate_summaries('/home/cdsw/tmp/bigdl_summaries', 'cnn')
optimizer.set_train_summary(train_summary)
optimizer.set_val_summary(val_summary)


# * Train the network. Wait some time till it finished.. Voila! You've got a trained model


trained_model = optimizer.optimize()

# ## 4. Predication on test data

def map_predict_label(l):
    return np.array(l).argmax()
def map_groundtruth_label(l):
    return l[0] - 1
Example #2
0

# 4. Optimizer setup
# Create an Optimizer

optimizer = Optimizer(
    model=model,
    training_rdd=train_data,
    criterion=MSECriterion(),
    optim_method=Adam(),
    end_trigger=MaxEpoch(2),
    batch_size=batch_size)


# generate summaries and start tensortboard if needed
(train_summary, val_summary) = generate_summaries('/home/cdsw/tmp/bigdl_summaries', 'autoencoder')
optimizer.set_train_summary(train_summary)
optimizer.set_val_summary(val_summary)


#Train model
trained_model = optimizer.optimize()


# 5. Loss visualization
# Let's draw the performance curve during optimization.

loss = np.array(train_summary.read_scalar("Loss"))

plt.figure(figsize = (12,12))
plt.plot(loss[:,0],loss[:,1],label='loss')
Example #3
0
    training_rdd=train_data,
    criterion=ClassNLLCriterion(),
    optim_method=SGD(learningrate=learning_rate),
    end_trigger=MaxEpoch(training_epochs),
    batch_size=batch_size)

# Set the validation logic
optimizer.set_validation(
    batch_size=batch_size,
    val_rdd=test_data,
    trigger=EveryEpoch(),
    val_method=[Top1Accuracy()]
)

# generate summaries and start tensortboard if needed
(train_summary, val_summary) = generate_summaries('/home/cdsw/tmp/bigdl_summaries', 'multilayer_perceptron')
optimizer.set_train_summary(train_summary)
optimizer.set_val_summary(val_summary)

get_ipython().run_cell_magic(u'time', u'', u'# Boot training process\ntrained_model = optimizer.optimize()\nprint "Optimization Done."')


# 5. Loss visualization

# After training, we can draw the preformance curves from the previous `train_summary` and `val_summary` variables.

loss = np.array(train_summary.read_scalar("Loss"))
top1 = np.array(val_summary.read_scalar("Top1Accuracy"))
def plotLoss():
  plt.figure(figsize = (12,12))
  plt.subplot(2,1,1)
Example #4
0
# Create an Optimizer

optimizer = Optimizer(
    model=lenet_model,
    training_rdd=train_data,
    criterion=ClassNLLCriterion(),
    optim_method=SGD(learningrate=0.4, learningrate_decay=0.0002),
    end_trigger=MaxEpoch(5),
    batch_size=256)

# Set the validation logic
optimizer.set_validation(
    batch_size=256,
    val_rdd=test_data,
    trigger=EveryEpoch(),
    val_method=[Top1Accuracy(), Loss()]
)

app_name='lenet-'+dt.datetime.now().strftime("%Y%m%d-%H%M%S")
# create TrainSummary

(train_summary,val_summary) = generate_summaries('/tmp/bigdl_summaries', app_name)
optimizer.set_train_summary(train_summary)
optimizer.set_val_summary(val_summary)

# Boot training process
trained_model = optimizer.optimize()
print "Optimization Done."