batch_size=params.batch_size, shuffle=True, num_workers=1) # num_workers 几个线程参与读数据 valid_data = DataLoader(val_dataset, batch_size=params.batch_size, shuffle=False, num_workers=1) # build the model ae = AutoEncoder(params.n_attr).cuda() lat_dis = LatentDiscriminator(params.n_attr).cuda() ptc_dis = PatchDiscriminator().cuda() clf_dis = Classifier(params.n_attr).cuda() # trainer / evaluator trainer = Trainer(ae, lat_dis, ptc_dis, clf_dis, train_data, params) evaluator = Evaluator(ae, lat_dis, ptc_dis, clf_dis, valid_data, params) for n_epoch in range(params.n_epochs): logger.info('Starting epoch %i...' % n_epoch) for n_iter in range(0, params.epoch_size, params.batch_size): # latent discriminator training trainer.lat_dis_step() # patch discriminator training trainer.ptc_dis_step() # classifier discriminator training
from interpolate import * # from src.evaluation import Evaluator params = parameters.trainParams() params.attr = attr_flag(params.attr) check_attr(params) print("params checked") logger = initialize_exp(params) data, attributes = load_images(params) train_data = DataSampler(data[0], attributes[0], params) valid_data = DataSampler(data[1], attributes[1], params) ae = AutoEncoder(params).cuda() lat_dis = LatentDiscriminator(params).cuda() if params.n_lat_dis else None print("data and model loaded and created") trainer = Trainer(ae, lat_dis, None, None, train_data, params) # evaluator = Evaluator(ae, lat_dis, None, None, None, valid_data, params) print("Trainer created. Starting Training") for n_epoch in range(params.n_epochs): logger.info('Starting epoch %i...' % n_epoch) for n_iter in range(0, params.epoch_size, params.batch_size): print("n_iter = " + str(n_iter)) trainer.lat_dis_step() trainer.autoencoder_step() trainer.step(n_iter) logger.info('End of epoch %i.\n' % n_epoch) if n_epoch%5 == 0: interpolate(trainer.ae, n_epoch) print("Training End. Saving Model!") trainer.save_model('final')
valid_data = DataSampler(data[1], attributes[1], params) # build the trainable model ae = AutoEncoder(params).cuda() # build the Teacher model (if required) if params.lambda_jacobian > 0: params2 = params params2.max_fm = 512 + params.n_attr ae_teacher = AutoEncoder(params2).cuda() params.max_fm = params.max_fm_orig else: ae_teacher = None # trainer / evaluator trainer = Trainer(ae, ae_teacher, train_data, params) evaluator = Evaluator(ae, ae_teacher, valid_data, params) for n_epoch in range(params.n_epochs): logger.info('Starting epoch %i...' % n_epoch) for eee in range(25): evaluator.autoencoder_step(iterno=eee, epoch=n_epoch) evaluator.step(n_epoch) for n_iter in range(0, params.epoch_size, params.batch_size): # autoencoder training trainer.autoencoder_step()
import json from dotmap import DotMap import numpy as np import pandas as pd from src.datastore import DataStore from src.model import ConvNet from src.training import Trainer with open('config/basic_config.json') as config_file: config = json.load(config_file) config = DotMap(config) datastore = DataStore.from_csv(config.data) train_ds, eval_ds = datastore.get_tf_dataset(32) model = ConvNet() trainer = Trainer(model) trainer.run_epochs(train_ds, eval_ds, 10) with open('config/eval_test_samples.json') as config_file: config = json.load(config_file) config = DotMap(config) datastore = DataStore.from_csv(config.data) test_ds = datastore.get_tf_dataset(32) offset = len(test_ds) - (len(test_ds) % 128) test_ds_part_1 = test_ds[:offset].reshape(-1, 128, test_ds.shape[1]) test_ds_part_2 = test_ds[offset:] submission = None batch_no = 0 for batch in test_ds_part_1: