示例#1
0
文件: train_comp.py 项目: konng88/ML
    #     layer.trainable = False
    # x1 = inp1
    # x2 = inp2
    # for i in range(1,len(base.layers)):
    #     x1 = base.layers[i](x1)
    #     x2 = base.layers[i](x2)

    x1 = gp1(x1)
    x2 = gp2(x2)
    x1 = d1(x1)
    x2 = d2(x2)
    x = tf.stack([x1,x2],axis=1)
    x = lstm(x)
    x = d3(x)
    model = Model(inputs=[inp1,inp2], outputs=x)

    def scheduler(epoch):
        return 0.005 * (0.4**(epoch//3))

    callback = tf.keras.callbacks.LearningRateScheduler(scheduler)
    saver = tf.keras.callbacks.ModelCheckpoint(filepath='saved/model',save_best_only=True,verbose=1)

        # define optimizers
    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
    tf.keras.utils.plot_model(model)
    model.summary()

    # training
    history = model.fit(x=train_generator,validation_data=test_generator,epochs=5,workers=4,callbacks=[saver,callback])
    plot_training_curve(history.history)
示例#2
0
    te_acc = test(logger, epoch, model, testloader, criterion, device)  
    train_acc.append(tr_acc)
    test_acc.append(te_acc)
    loss_list.append(loss)
    if te_acc > BEST_ACC:
        BEST_ACC = te_acc
        logger.info('Saving best epoch {}...'.format(len(train_acc)))
        state = {'model': model.state_dict(),
                 'epoch': len(train_acc),
                 'best_acc': BEST_ACC}
        torch.save(state, os.path.join(save_path, 'finetune_best_ckpt.t7'))

total_time = time.time() - start_time
mac, params = get_model_complexity_info(model, (3, RESOLUTION, RESOLUTION), as_strings=True, print_per_layer_stat=False)
MAC_list.append(mac)
params_list.append(params)

logger.info('train_acc: {}'.format(train_acc))
logger.info('test_acc: {}'.format(test_acc))
logger.info('best test acc : {}'.format(max(test_acc)))
logger.info('mean acc of last 5 epoch : {}'.format(sum(test_acc[-5:])/5.0))
logger.info('mac:{}'.format(MAC_list))
logger.info('params:{}'.format(params_list))
logger.info('growing time: {}, total time: {}'.format(growing_time, total_time))
logger.info('Final model is {}'.format(model))

#plot and save training curve
plot_training_curve(train_acc, test_acc, loss_list, save_path)