Example #1
0
    def __init__(self):
        opt = TrainOptions().parse()
        os.system('rm -rf ./logs_' + opt.problem)
        dataset = load_data(opt.num)
        print('Dataset loaded!')
        model = Model(opt)

        if opt.mode == 'train':
            self.train(opt, dataset, model)
            self.test(opt, dataset, model)
        else:
            self.test(opt, dataset, model)
Example #2
0
def train():
    ##############   initialization   ################
    ### load csv files of annotations, initialize options
    dataroot = './data'
    train_list = np.array(
        pd.read_csv(os.path.join(dataroot, 'labels/training_set_anno.csv')))
    val_list = np.array(
        pd.read_csv(os.path.join(dataroot, 'labels/validation_set_anno.csv')))
    opt_mask = TrainOptions().parse()
    opt = copy.deepcopy(opt_mask)
    opt.global_transform = opt.global_transform[1:]
    opt.global_transform_para = opt.global_transform_para[1:]
    train_dataroot = os.path.join(dataroot, 'dataset/train')
    val_dataroot = os.path.join(dataroot, 'dataset/val')

    # define dataset and dataloader
    train_dataset = LXD_dataset.LXD_dataset(opt, train_dataroot, train_list,
                                            'train')
    train_dataset_mask = LXD_dataset.LXD_dataset(opt_mask, train_dataroot,
                                                 train_list, 'train')
    con_dataset_train = LXD_dataset.con_dataset(train_dataset,
                                                train_dataset_mask)
    val_dataset = LXD_dataset.LXD_dataset(opt, val_dataroot, val_list, 'val')
    val_dataset_mask = LXD_dataset.LXD_dataset(opt_mask, val_dataroot,
                                               val_list, 'val')
    con_dataset_val = LXD_dataset.con_dataset(val_dataset, val_dataset_mask)
    train_loader = torch.utils.data.DataLoader(con_dataset_train,
                                               batch_size=opt.batch_size,
                                               shuffle=True)
    val_loader = torch.utils.data.DataLoader(con_dataset_val,
                                             batch_size=4,
                                             shuffle=False)

    # define model and trainer
    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_ids
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    opt.device = device
    model1 = models.vgg16_bn(pretrained=False)
    model2 = models.vgg16_bn(pretrained=False)
    model = mVGG16.CSRA_VGG16_2branch(model1, model2, opt, device)
    if opt.use_gpu:
        model.to(device=device)
    trainer = CSRA_trainer.CSRA_Trainer(opt, model)

    # training and validation
    for epoch in range(opt.epochs):
        acc = trainer.train(train_loader)
        with torch.no_grad():
            trainer.eval(val_loader)
        if acc > 0.98:
            break
        if epoch == 10:
            trainer.adjust_learning_rate(0.1)
Example #3
0
import time
from train_options import TrainOptions
from data import create_dataset
from models import create_model
from visualizer import Visualizer

if __name__ == '__main__':
    opt = TrainOptions().parse()   # get training options
    dataset = create_dataset(opt)  # create a dataset given opt.dataset_mode and other options
    dataset_size = len(dataset)    # get the number of images in the dataset.
    print('The number of training images = %d' % dataset_size)

    model = create_model(opt)      # create a model given opt.model and other options
    model.setup(opt)               # regular setup: load and print networks; create schedulers
    visualizer = Visualizer(opt)   # create a visualizer that display/save images and plots
    total_iters = 0                # the total number of training iterations

    for epoch in range(opt.epoch_count, opt.n_epochs + opt.n_epochs_decay + 1):    # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>
        epoch_start_time = time.time()  # timer for entire epoch
        iter_data_time = time.time()    # timer for data loading per iteration
        epoch_iter = 0                  # the number of training iterations in current epoch, reset to 0 every epoch
        visualizer.reset()              # reset the visualizer: make sure it saves the results to HTML at least once every epoch
        model.update_learning_rate()    # update learning rates in the beginning of every epoch.
        for i, data in enumerate(dataset):  # inner loop within one epoch
            iter_start_time = time.time()  # timer for computation per iteration
            if total_iters % opt.print_freq == 0:
                t_data = iter_start_time - iter_data_time

            total_iters += opt.batch_size
            epoch_iter += opt.batch_size
            model.set_input(data)         # unpack data from dataset and apply preprocessing
Example #4
0
import time
import sys
import tensorflow as tf
sys.path.append('/home/zhangjunhao/options')
from train_options import TrainOptions
sys.path.append('/home/zhangjunhao/data')
from data_loader import CreateDataLoader
sys.path.append('/home/zhangjunhao/model')
from model_Loader import CreateModel
sys.path.append('/home/zhangjunhao/util')
from utils import error as err

opt = TrainOptions().parse()
data_loader = CreateDataLoader(opt)
model = CreateModel(opt)

sess = tf.Session()
loss_g = tf.placeholder(tf.float32)
loss_d = tf.placeholder(tf.float32)
me1 = tf.summary.scalar('loss_g', loss_g)
me2 = tf.summary.scalar('loss_d', loss_d)
merged = tf.summary.merge([me1, me2])
writer = tf.summary.FileWriter("/home/zhangjunhao/logs", sess.graph)

err = err(model.save_dir)
for epoch in range(opt.count_epoch + 1, opt.epochs + 1):
    epoch_start_time = time.time()
    err.initialize()

    for i, data in enumerate(data_loader):
        model.forward(data)
Example #5
0
""" Entry point for training
"""

from train_options import TrainOptions
from trainer import Trainer

if __name__ == '__main__':
    options = TrainOptions().parse_args()
    trainer = Trainer(options)
    trainer.train()