Exemple #1
0
def main():
    # parse the arguments
    args = parser.parse_args()
    random.seed(args.manual_seed)
    torch.manual_seed(args.manual_seed)
    utils.saveargs(args)

    # initialize the checkpoint class
    checkpoints = Checkpoints(args)

    # Create Model
    models = Model(args)
    model, criterion = models.setup(checkpoints)

    # Data Loading
    dataloader = Dataloader(args)
    loaders = dataloader.create()

    # The trainer handles the training loop
    trainer = Trainer(args, model, criterion)
    # The trainer handles the evaluation on validation set
    tester = Tester(args, model, criterion)

    # start training !!!
    loss_best = 1e10
    for epoch in range(args.nepochs):

        # train for a single epoch
        loss_train = trainer.train(epoch, loaders)
        loss_test = tester.test(epoch, loaders)

        if loss_best > loss_test:
            model_best = True
            loss_best = loss_test
            checkpoints.save(epoch, model, model_best)
Exemple #2
0
    def non_cluster_pytorchnet(pop, args):
        for i in range(len(pop)):
            torch.manual_seed(args.manual_seed)

            # Create Model
            models = Model(args, pop[i].genome)
            model, criterion, num_params = models.setup()
            model = calculate_flops.add_flops_counting_methods(model)

            # Data Loading
            dataloader = Dataloader(args)
            loaders = dataloader.create()

            # The trainer handles the training loop
            trainer = Trainer(args, model, criterion)
            # The trainer handles the evaluation on validation set
            tester = Tester(args, model, criterion)

            # start training !!!
            acc_test_list = []
            acc_best = 0
            train_time_start = time.time()
            for epoch in range(args.nepochs):
                # train for a single epoch

                if epoch == 0:
                    model.start_flops_count()
                loss_train, acc_train = trainer.train(epoch, loaders)
                loss_test, acc_test = tester.test(epoch, loaders)
                acc_test_list.append(acc_test)

                if epoch == 0:
                    n_flops = (model.compute_average_flops_cost() / 1e6 / 2)
                # update the best test accu found so found
                if acc_test > acc_best:
                    acc_best = acc_test

                # print("Epoch {}, train loss = {}, test accu = {}, best accu = {}, {} sec"
                #       .format(epoch, np.average(loss_train), acc_test, acc_best, time_elapsed))

                if np.isnan(np.average(loss_train)):
                    break

            # end of training
            time_elapsed = np.round((time.time() - train_time_start), 2)
            pop[i].fitness[0] = 100.0 - np.mean(acc_test_list[-3:])
            pop[i].fitness[1] = n_flops
            pop[i].n_params = num_params
            pop[i].n_FLOPs = n_flops
            print(
                "Indv {:d}:, test error={:0.2f}, FLOPs={:0.2f}M, n_params={:0.2f}M, {:0.2f} sec"
                .format(i, pop[i].fitness[0], n_flops, num_params / 1e6,
                        time_elapsed))

        return
Exemple #3
0
 def getmetadata(self):
     args = self.args
     #args.dataset_train = 'metalist'
     args.dataset_train = 'metadata'
     args.loader_train = 'h5pymeta'
     dataloader = Dataloader(args)
     loader = dataloader.create(flag="Test")
     data_iter = iter(loader)
     i = 0
     input_metadata = []
     while i < len(loader):
         i += 1
         composition, metadata = data_iter.next()
         input_composition.append(data["composition"])
         input_metadata.append(data["metadata"])
     return input_metadata
Exemple #4
0
def main():
    # parse the arguments
    args = config.parse_args()
    if (args.ngpu > 0 and torch.cuda.is_available()):
        device = "cuda:0"
    else:
        device = "cpu"
    args.device = torch.device(device)
    random.seed(args.manual_seed)
    torch.manual_seed(args.manual_seed)
    if args.save_results:
        utils.saveargs(args)

    # initialize the checkpoint class
    checkpoints = Checkpoints(args)

    # Create Model
    models = Model(args)
    model, criterion, evaluation = models.setup(checkpoints)

    print('Model:\n\t{model}\nTotal params:\n\t{npar:.2f}M'.format(
        model=args.model_type,
        npar=sum(p.numel() for p in model.parameters()) / 1000000.0))

    # Data Loading
    dataloader = Dataloader(args)
    loaders = dataloader.create()

    # The trainer handles the training loop
    trainer = Trainer(args, model, criterion, evaluation)
    # The trainer handles the evaluation on validation set
    tester = Tester(args, model, criterion, evaluation)

    # start training !!!
    loss_best = 1e10
    for epoch in range(args.nepochs):
        print('\nEpoch %d/%d\n' % (epoch + 1, args.nepochs))

        # train for a single epoch
        loss_train = trainer.train(epoch, loaders)
        loss_test = tester.test(epoch, loaders)

        if loss_best > loss_test:
            model_best = True
            loss_best = loss_test
            if args.save_results:
                checkpoints.save(epoch, model, model_best)
Exemple #5
0
def main():
    # Parse the Arguments
    args = config.parse_args()
    random.seed(args.manual_seed)
    tf.set_random_seed(args.manual_seed)
    now = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S/')
    args.save = os.path.join(args.result_path, now, 'save')
    args.logs = os.path.join(args.result_path, now, 'logs')
    if args.save_results:
        utils.saveargs(args)

    # Initialize the Checkpoints Class
    checkpoints = Checkpoints(args)

    # Create Model
    models = Model(args)
    model, criterion, evaluation = models.setup(checkpoints)

    # Print Model Summary
    print('Model summary: {}'.format(model.name))
    print(model.summary())

    # Data Loading
    dataloader_obj = Dataloader(args)
    dataloader = dataloader_obj.create()

    # Initialize Trainer and Tester
    trainer = Trainer(args, model, criterion, evaluation)
    tester = Tester(args, model, criterion, evaluation)

    # Start Training !!!
    loss_best = 1e10
    for epoch in range(args.nepochs):
        print('\nEpoch %d/%d' % (epoch + 1, args.nepochs))

        # Train and Test for a Single Epoch
        loss_train = trainer.train(epoch, dataloader["train"])
        loss_test = tester.test(epoch, dataloader["test"])

        if loss_best > loss_test:
            model_best = True
            loss_best = loss_test
            if args.save_results:
                checkpoints.save(epoch, model, model_best)
def main():
    demog_type = 'race'
    demog_target = {'gender': 1, 'age': 2, 'race': 3}
    demog_refer = {'gender': [2, 3], 'age': [1, 3], 'race': [1, 2]}

    # initialize the checkpoint class
    checkpoints = Checkpoints(args)

    # Create Model
    models = Model(args)
    model, model_dict, evaluation = models.setup(checkpoints)

    print('Model:\n\t{model}\nTotal params:\n\t{npar:.2f}M'.format(
        model=args.model_type,
        npar=sum(p.numel() for p in model['feat'].parameters()) / 1000000.0))

    # The trainer handles the evaluation on validation set
    tester = Tester(args, model, model_dict['loss'], evaluation)

    test_freq = 1

    dataloader = Dataloader(args)
    dataset_options_test = args.dataset_options_test

    resfilename = '/research/prip-gongsixu/codes/biasface/results/evaluation/demogbias/race.txt'
    gdemog = get_demog_cohorts(demog_type)
    with open(resfilename, 'w') as f:
        for demog_group in gdemog:
            dataset_options_test['target_ind'] = demog_target[demog_type]
            dataset_options_test['refer_ind'] = demog_refer[demog_type]
            dataset_options_test['demog_group'] = demog_group
            args.dataset_options_test = dataset_options_test
            loaders = dataloader.create(flag='Test')
            acc_test = tester.test_demog(demog_type, loaders)
            f.write(demog_group + '\t' + str(acc_test) + '\n')
            print(acc_test)
Exemple #7
0
    modelG.load_state_dict(checkpointG)
if args.netE is not '':
    checkpointE = checkpoints.load(args.netE)
    Encoder.load_state_dict(checkpointE)
if args.prevD is not '':
    prevD = copy.deepcopy(modelD)
    checkpointDprev = checkpoints.load(args.prevD)
    prevD.load_state_dict(checkpointDprev)
if args.prevG is not '':
    prevG = copy.deepcopy(modelG)
    checkpointGprev = checkpoints.load(args.prevG)
    prevG.load_state_dict(checkpointGprev)

# Data Loading
dataloader = Dataloader(args)
loader_train = dataloader.create(flag="Train")
loader_test = dataloader.create(flag="Test")

# The trainer handles the training loop and evaluation on validation set
if args.gogan_type == "no_vae":
    from train_no_vae import Trainer
elif args.gogan_type == "identity":
    from train_identity import Trainer
elif args.gogan_type == "no_identity":
    from train_no_identity import Trainer
elif args.gogan_type == "no_identity_enc":
    from train_no_identity_enc import Trainer
else:
    from train import Trainer
trainer = Trainer(args, modelD, modelG, Encoder, criterion, prevD, prevG)
Exemple #8
0
def main():
    # load pop for extrapolation
    pop = pickle.load(open("Results/CIFAR10_baseline/Run5/pop_extra.pkl",
                           "rb"))

    for i in range(len(pop)):
        genome = pop[i].genome
        # parse the arguments
        args = parser.parse_args()
        args.save = os.path.join("Extrapolation_results",
                                 "Model_ID_{}".format(pop[i].id))
        random.seed(args.manual_seed)
        torch.manual_seed(args.manual_seed)
        utils.saveargs(args)

        # initialize the checkpoint class
        checkpoints = Checkpoints(args)

        # Create Model
        # genome = [[[0], [0, 0], [0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0, 0], [0]],
        #           [[0], [0, 0], [0, 1, 0], [0, 1, 0, 1], [0, 1, 1, 1, 1], [0]],
        #           [[1], [0, 0], [0, 0, 0], [0, 1, 1, 1], [0, 0, 0, 0, 1], [1]]]
        # genome = [[[0], [0, 1], [1, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1, 1], [0]],
        #           [[0], [0, 1], [0, 0, 0], [0, 0, 1, 1], [1, 1, 1, 1, 1], [0]],
        #           [[0], [0, 0], [0, 0, 1], [1, 1, 0, 1], [1, 1, 0, 1, 1], [1]]]
        models = Model(args, genome)
        model, criterion, num_params = models.setup()
        model = calculate_flops.add_flops_counting_methods(model)
        # print(model)

        # Data Loading
        dataloader = Dataloader(args)
        loaders = dataloader.create()

        # The trainer handles the training loop
        trainer = Trainer(args, model, criterion)
        # The trainer handles the evaluation on validation set
        tester = Tester(args, model, criterion)

        # start training !!!
        loss_best = 1e10
        acc_test_list = []
        acc_best = 0
        for epoch in range(args.nepochs):

            # train for a single epoch
            start_time_epoch = time.time()
            if epoch == 0:
                model.start_flops_count()
            loss_train, acc_train = trainer.train(epoch, loaders)
            loss_test, acc_test = tester.test(epoch, loaders)
            acc_test_list.append(acc_test)

            if acc_test > acc_best:
                model_best = True
                # update the best test accu found so found
                acc_best = acc_test
                loss_best = loss_test
                checkpoints.save(epoch, model, model_best)

            time_elapsed = np.round((time.time() - start_time_epoch), 2)
            if epoch == 0:
                n_flops = (model.compute_average_flops_cost() / 1e6 / 2)

            if np.isnan(np.average(loss_train)):
                break

            print(
                "Epoch {:d}:, test error={:0.2f}, FLOPs={:0.2f}M, n_params={:0.2f}M, {:0.2f} sec"
                .format(epoch, 100.0 - acc_test, n_flops, num_params / 1e6,
                        time_elapsed))

        # save the final model parameter
        # torch.save(model.state_dict(),
        #            "model_file/model%d.pth" % int(args.genome_id - 1))
        pop[i].fitness[0] = acc_best
        pop[i].fitness[1] = n_flops
        pop[i].n_params = num_params
        # error = 100 - np.mean(acc_test_list[-3:])

        # accuracy = acc_best
        # fitness = [acc_best, n_flops, num_params]
        # with open("output_file/output%d.pkl" % int(args.genome_id - 1), "wb") as f:
        #     pickle.dump(fitness, f)

    with open("Results/CIFAR10_baseline/Run5/pop_extra_evaluated.pkl",
              "wb") as f:
        pickle.dump(pop, f)
Exemple #9
0
def main():
    # parse the arguments
    args = parser.parse_args()
    random.seed(args.manual_seed)
    torch.manual_seed(args.manual_seed)
    # utils.saveargs(args)

    # initialize the checkpoint class
    # checkpoints = Checkpoints(args)


    # Create Model
    models = Model(args)
    model, criterion, num_params = models.setup()
    model = calculate_flops.add_flops_counting_methods(model)
    # print(model)

    # Data Loading
    dataloader = Dataloader(args)
    loaders = dataloader.create()

    # The trainer handles the training loop
    trainer = Trainer(args, model, criterion)
    # The trainer handles the evaluation on validation set
    tester = Tester(args, model, criterion)

    # start training !!!
    loss_best = 1e10
    acc_test_list = []
    acc_best = 0
    for epoch in range(args.nepochs):

        # train for a single epoch
        start_time_epoch = time.time()
        if epoch == 0:
            model.start_flops_count()
        loss_train, acc_train = trainer.train(epoch, loaders)
        loss_test, acc_test = tester.test(epoch, loaders)
        acc_test_list.append(acc_test)
        # if loss_best > loss_test:
        #     model_best = True
        #     loss_best = loss_test
        #     checkpoints.save(epoch, model, model_best)

        time_elapsed = np.round((time.time() - start_time_epoch), 2)
        if epoch == 0:
            n_flops = (model.compute_average_flops_cost() / 1e6 / 2)
        # update the best test accu found so found
        if acc_test > acc_best:
            acc_best = acc_test

        if np.isnan(np.average(loss_train)):
            break

        print("Epoch {:d}:, test error={:0.2f}, FLOPs={:0.2f}M, n_params={:0.2f}M, {:0.2f} sec"
              .format(epoch, 100.0-acc_test, n_flops, num_params/1e6, time_elapsed))

    # save the final model parameter
    # torch.save(model.state_dict(),
    #            "model_file/model%d.pth" % int(args.genome_id - 1))
    error = 100 - np.mean(acc_test_list[-3:])

    # accuracy = acc_best
    fitness = [error, n_flops, num_params]
    with open("output_file/output%d.pkl" % int(args.genome_id - 1), "wb") as f:
        pickle.dump(fitness, f)
Exemple #10
0
def main():
    # parse the arguments
    args = parser.parse_args()
    random.seed(args.manual_seed)
    torch.manual_seed(args.manual_seed)
    # utils.saveargs(args)

    # initialize the checkpoint class
    # checkpoints = Checkpoints(args)

    # Create Model
    models = Model(args)
    model, criterion = models.setup()
    # print(model)

    # Data Loading
    dataloader = Dataloader(args)
    loaders = dataloader.create()

    # The trainer handles the training loop
    trainer = Trainer(args, model, criterion)
    # The trainer handles the evaluation on validation set
    tester = Tester(args, model, criterion)

    # start training !!!
    loss_best = 1e10
    acc_test_list = []
    inference_time_list = []
    acc_best = 0
    for epoch in range(args.nepochs):

        # train for a single epoch
        start_time_epoch = time.time()
        loss_train, acc_train = trainer.train(epoch, loaders)
        inference_time_start = time.time()
        loss_test, acc_test = tester.test(epoch, loaders)
        inference_time_list.append(
            np.round((time.time() - inference_time_start), 2))
        acc_test_list.append(acc_test)
        # if loss_best > loss_test:
        #     model_best = True
        #     loss_best = loss_test
        #     checkpoints.save(epoch, model, model_best)

        time_elapsed = np.round((time.time() - start_time_epoch), 2)

        # update the best test accu found so found
        if acc_test > acc_best:
            acc_best = acc_test

        print(
            "Epoch {}, train loss = {}, test accu = {}, best accu = {}, {} sec"
            .format(epoch, np.average(loss_train), acc_test, acc_best,
                    time_elapsed))

    # save the final model parameter
    # torch.save(model.state_dict(),
    #            "model_file/model%d.pth" % int(args.genome_id - 1))
    accuracy = np.mean(acc_test_list[-5:])
    inference_time = np.median(inference_time_list)

    # accuracy = acc_best
    fitness = [accuracy, inference_time]
    with open("output_file/output%d.pkl" % int(args.genome_id - 1), "wb") as f:
        pickle.dump(fitness, f)
Exemple #11
0
class Minirun():
    def __init__(self, nrun=-1):
        self.args = Namespace(
            cuda=True,
            ndf=8,
            nef=8,
            wkld=0.01,
            gbweight=100,
            nlatent=9, 
            nechannels=6,
            ngchannels=1,
            resume="",
            save="mini-save",
            loader_train="h5py",
            loader_test="h5py",
            dataset_test=None,
            dataset_train="filelist",
            split_test=0.0,
            split_train=1.0,
            filename_test="./data/data.txt",
            filename_train="./data/data.txt",
            batch_size=64,
            resolution_high=512,
            resolution_wide=512,
            nthreads=32,
            images="mini-save/images",
            pre_name="save",
        )
        latest_save = sorted(list(Path("results").iterdir()))[nrun]
        self.rundate = latest_save.name
        latest_save = latest_save.joinpath("Save")
        latest_save = {"netG": latest_save}
        self.args.resume = latest_save
        checkpoints = Checkpoints(self.args)

        # Create model
        models = Model(self.args)
        self.model, self.criterion = models.setup(checkpoints)

        # Data loading
        self.dataloader = Dataloader(self.args)
        self.loader = self.dataloader.create(flag="Test")
        print("\t\tBatches:\t", len(self.loader))

        self.resolution_high = self.args.resolution_high
        self.resolution_wide = self.args.resolution_wide
        self.batch_size = self.args.batch_size
        self.ngchannels = self.args.ngchannels
        self.nechannels = self.args.nechannels
        self.nlatent = self.args.nlatent
        self.composition = torch.FloatTensor(self.batch_size, self.ngchannels, self.resolution_high, self.resolution_wide)
        self.metadata = torch.FloatTensor(self.batch_size, self.nechannels, self.resolution_high, self.resolution_wide)

        if self.args.cuda:
            self.composition = self.composition.cuda()
            self.metadata = self.metadata.cuda()

        self.composition = Variable(self.composition)
        self.metadata = Variable(self.metadata)

        self.imgio = plugins.ImageIO(self.args.images, self.args.pre_name)

    def date(self):
        return self.rundate

    def getmetadata(self):
        args = self.args
        #args.dataset_train = 'metalist'
        args.dataset_train = 'metadata'
        args.loader_train = 'h5pymeta'
        dataloader = Dataloader(args)
        loader = dataloader.create(flag="Test")
        data_iter = iter(loader)
        i = 0
        input_metadata = []
        while i < len(loader):
            i += 1
            composition, metadata = data_iter.next()
            input_composition.append(data["composition"])
            input_metadata.append(data["metadata"])
        return input_metadata

    def mini(self, return_data=True):
        data_iter = iter(self.loader)
        self.model["netG"].eval()
        i = 0
        generated_data = []
        input_data = []
        mydata = []
        while i < len(self.loader):
            i += 1
            composition, metadata = data_iter.next()
            for aa in metadata:
                mydata.append(aa)
            with open('minirun_metadata.txt', "w") as f:
                f.write(str(mydata[42]))
                #h1 = [1, 2, 3, 4]
                #for index in h1:
                #    f.write(str(mydata[index-1]))
            composition = composition.float()
            batch_size = composition.size(0)
            self.composition.data.resize_(composition.size()).copy_(composition)
            self.metadata.data.resize_(metadata.size()).copy_(metadata)

            self.model["netG"].zero_grad()

            # run the actual models
            output = self.model["netG"].forward(self.metadata)
            batch_gen = output.data.cpu()
            generated_data.append(batch_gen)
            input_data.append(composition)
        gen_data = torch.cat(generated_data, 0)
        input_data = torch.cat(input_data, 0)
        #print (gen_data.size())
        #print (input_data.size())
        
        #ypred = np.array(gen_data[0])
        #ytest = np.array(input_data[0])
        #ypred = pd.DataFrame([ypred])
        #ytest = pd.DataFrame([ytest])
        print ('----------------------------------------------------')
        #lpred = []
        #for i in ypred:
        #    for j in i:
        #        for k in j:
        #            lpred.append(k)
        # 
        #lpred = np.asarray(lpred)
        #ypred = pd.DataFrame([lpred])
        #print (ypred)
        #print ('----------------------------------------------------')
        #ltest = []
        #for i in ytest:
        #    for j in i:
        #        for k in j:
        #            ltest.append(k)
        #ltest = np.asarray(ltest)
        #ytest = ltest
        #print (ytest)
        #print (ytest[0])
        
        # Area fraction calculation for generated image from machine
        values = gen_data[42]
        positive_composition = 0
        negative_composition = 0
        ymodel = []
        for values1 in np.nditer(values):
            if values1 >= 0:
                temp = 1
            else:
                temp = 0
            ymodel.append(temp)
        
        #matrix_percentage = (positive_composition * 100) / 262144
        #ppt_percentage = (negative_composition * 100) / 262144
        #print ('Area fraction analysis for machine generated data')
        #print (matrix_percentage)
        #print (ppt_percentage)

        # Area fraction calculation for input image from phase field code
        values = input_data[42]
        positive_composition = 0
        negative_composition = 0
        ytest = []

        for values1 in np.nditer(values):
            if values1 >= 0:
                temp = 1
            else:
                temp = 0
            ytest.append(temp)

        print ('****************************************************')
        print (ytest)
        print (type(ytest))
        print ('****************************************************')
        print (ymodel)
        print (type(ymodel))
        print ('****************************************************')
        
        fpr, tpr, thresholds = metrics.roc_curve(ytest, ymodel)
        plt.figure(figsize = (12, 10))
        plt.plot(fpr, tpr)
        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.0])
        plt.xlabel('False positive rate')
        plt.ylabel('True positive rate')
        plt.show()
        plt.savefig('roc_curve.png')

        #matrix_percentage = (positive_composition * 100) / 262144
        #ppt_percentage = (negative_composition * 100) / 262144
        #print ('Area fraction analysis for input data from phase field simulation')
        #print (matrix_percentage)
        #print (ppt_percentage)

        
        #with open('gen_data.txt', 'w') as f:
        #    f.write(str(gen_data[:1]))

        #with open('input_data.txt', 'w') as f:
        #    f.write(str(input_data[:1]))

        if return_data:
            return gen_data, input_data
        else:
            self.imgio.update({"input": input_data, "sample": gen_data})
            #self.imgio.save(0, style='magnitude')
            self.imgio.save(0)

        accuracy = round(accuracy_score(ytest, ymodel) * 100, 2)
        print (accuracy)
Exemple #12
0
def main():
    # parse the arguments
    args = config.parse_args()
    random.seed(args.manual_seed)
    torch.manual_seed(args.manual_seed)
    args.save = os.path.join(args.result_path, 'save')
    args.logs = os.path.join(args.result_path, 'logs')
    utils.saveargs(args)

    # initialize the checkpoint class
    checkpoints = Checkpoints(args)

    # Create Model
    models = Model(args)
    rankgan_model, criterion = models.setup(checkpoints)
    modelD = rankgan_model[0]
    modelG = rankgan_model[1]
    Encoder = rankgan_model[2]
    prevD, prevG = None, None

    if args.netD is not '':
        checkpointD = checkpoints.load(args.netD)
        modelD.load_state_dict(checkpointD)
    if args.netG is not '':
        checkpointG = checkpoints.load(args.netG)
        modelG.load_state_dict(checkpointG)
    if args.netE is not '':
        checkpointE = checkpoints.load(args.netE)
        Encoder.load_state_dict(checkpointE)
    if args.prevD is not '':
        prevD = copy.deepcopy(modelD)
        checkpointDprev = checkpoints.load(args.prevD)
        prevD.load_state_dict(checkpointDprev)
    if args.prevG is not '':
        prevG = copy.deepcopy(modelG)
        checkpointGprev = checkpoints.load(args.prevG)
        prevG.load_state_dict(checkpointGprev)

    # Data Loading
    dataloader = Dataloader(args)
    loader_train = dataloader.create(flag="Train")
    loader_test = dataloader.create(flag="Test")

    # The trainer handles the training loop and evaluation on validation set
    trainer = Trainer(args, modelD, modelG, Encoder, criterion, prevD, prevG)

    # start training !!!
    num_stages = args.num_stages
    stage_epochs = args.stage_epochs
    for stage in range(args.start_stage, num_stages):

        # check whether ready to start new stage and if not, optimize discriminator
        # if stage > 2:
        #     print("Optimizing Discriminator")
        #     trainer.setup_stage(stage, loader_test)
        #     opt_disc_flag = True
        #     epoch = 0
        #     while opt_disc_flag:
        #         opt_disc_flag = trainer.optimize_discriminator(stage-1, epoch, loader_train)
        #         epoch += 1

        # setup trainer for the stage
        trainer.setup_stage(stage, loader_test)
        print("Training for Stage {}".format(stage))

        for epoch in range(stage_epochs[stage]):
            # train for a single epoch
            # cur_time = time.time()
            # if stage == 2:

            loss_train = trainer.train(stage, epoch, loader_train)
            # if stage > 0:
            #     disc_acc = trainer.test(stage, epoch, loader_test)
            # print("Time taken = {}".format(time.time() - cur_time))

            try:
                torch.save(modelD.state_dict(),
                           '%s/stage_%d_netD.pth' % (args.save, stage))
                torch.save(modelG.state_dict(),
                           '%s/stage_%d_netG.pth' % (args.save, stage))
                torch.save(Encoder.state_dict(),
                           '%s/stage_%d_netE.pth' % (args.save, stage))
            except Exception as e:
                print(e)
Exemple #13
0
def main():
    # parse the arguments
    random.seed(args.manual_seed)
    torch.manual_seed(args.manual_seed)
    if args.save_results:
        utils.saveargs(args, config_file)

    # initialize the checkpoint class
    checkpoints = Checkpoints(args)

    # Create Model
    models = Model(args)
    model, model_dict, evaluation = models.setup(checkpoints)

    print('Model:\n\t{model}\nTotal params:\n\t{npar:.2f}M'.format(
        model=args.model_type,
        npar=sum(p.numel() for p in model['feat'].parameters()) / 1000000.0))

    # The trainer handles the training loop
    trainer = Trainer(args, model, model_dict['loss'], evaluation)
    # The trainer handles the evaluation on validation set
    tester = Tester(args, model, model_dict['loss'], evaluation)

    test_freq = 1

    dataloader = Dataloader(args)

    if args.extract_feat:
        loaders = dataloader.create(flag='Test')
        tester.extract_features(loaders)
        # tester.extract_features_h5py(loaders, len(dataloader.dataset_test))
    elif args.just_test:
        loaders = dataloader.create(flag='Test')
        acc_test = tester.test(args.epoch_number, loaders)
        print(acc_test)
    else:

        loaders = dataloader.create()
        if args.dataset_train == 'ClassSamplesDataLoader':
            loaders['train'] = dataloader.dataset_train

        # start training !!!
        acc_best = 0
        loss_best = 999
        stored_models = {}

        for epoch in range(args.nepochs - args.epoch_number):
            epoch += args.epoch_number
            print('\nEpoch %d/%d\n' % (epoch + 1, args.nepochs))

            # train for a single epoch
            # loss_train = 3.0
            loss_train = trainer.train(epoch, loaders, checkpoints, acc_best)
            if float(epoch) % test_freq == 0:
                acc_test = tester.test(epoch, loaders)

            if loss_best > loss_train:
                model_best = True
                loss_best = loss_train
                acc_best = acc_test
                if args.save_results:
                    stored_models['model'] = model
                    stored_models['loss'] = trainer.criterion
                    checkpoints.save(acc_best, stored_models, epoch, 'final',
                                     model_best)
                    type=float,
                    default=0.999,
                    metavar='',
                    help='Beta 2 parameter for Adam')

args = parser.parse_args()
random.seed(args.manual_seed)
torch.manual_seed(args.manual_seed)
utils.save_args(args)

print('\n\n****** Creating {} model ******'.format(args.net_type))
setup = Model(args)
print("model created successfully!")
print('\n\n****** Preparing {} dataset *******'.format(args.dataset_train))
dataloader = Dataloader(args, setup.input_size)
loader_train, loader_test = dataloader.create()
print('data prepared successfully!')

# initialize model:
if args.resume is None:
    model = setup.model
    model.apply(utils.weights_init)
    train = setup.train
    test = setup.test
    init_epoch = 0
    acc_best = 0
    best_epoch = 0
    if os.path.isdir(args.save) == False:
        os.makedirs(args.save)

else:  # Transfer Learning
Exemple #15
0
def main():
    # parse the arguments
    random.seed(args.manual_seed)
    torch.manual_seed(args.manual_seed)
    if args.save_results:
        utils.saveargs(args, config_file)

    # initialize the checkpoint class
    checkpoints = Checkpoints(args)

    # Create Model
    models = Model(args)
    model_dict, evaluation = models.setup(checkpoints)

    print('Model:\n\t{model}\nTotal params:\n\t{npar:.4f}M'.format(
          model=args.model_type,
          npar=sum(p.numel() for p in model_dict['model'].parameters()) / 1000000.0))

    #### get kernel information ####
    ndemog = args.ndemog
    ndemog = list(range(ndemog))
    demog_combs = list(combinations(ndemog, 2))
    #### get kernel information ####

    #### create writer for tensor boader ####
    if args.save_results:
        writer = SummaryWriter(args.tblog_dir)
    else:
        writer = None
    #### create writer for tensor boader ####

    # The trainer handles the training loop
    trainer = Trainer(args, model_dict['model'], model_dict['loss'], model_dict['optimizer'], writer)
    # The trainer handles the evaluation on validation set
    tester = Tester(args, model_dict['model'], evaluation, writer)

    test_freq = 1

    dataloader = Dataloader(args)

    if args.extract_feat:
        loaders  = dataloader.create(flag='Test')
        tester.extract_features(loaders, 1)
    elif args.just_test:
        loaders  = dataloader.create(flag='Test')
        acc_test,acc_mean = tester.test(loaders, 1)
        print(acc_test, acc_mean)
    else:
        loaders  = dataloader.create()
        if args.dataset_train == 'ClassSamplesDataLoader':
            loaders['train'] = dataloader.dataset_train

        # start training !!!
        acc_best = 0
        loss_best = 999
        stored_models = {}

        for epoch in range(args.nepochs-args.epoch_number):
            epoch += args.epoch_number
            print('\nEpoch %d/%d\n' % (epoch + 1, args.nepochs))

            # train for a single epoch
            loss_train = trainer.train(loaders, epoch)

            acc_test=0
            if float(epoch) % test_freq == 0:
                acc_test,acc_mean = tester.test(loaders, epoch)

            if loss_best > loss_train:
                loss_best = loss_train
                acc_best = acc_test
            if  float(epoch) % test_freq == 0 and args.save_results:
                stored_models['model'] = trainer.model
                stored_models['loss'] = trainer.criterion
                stored_models['optimizer'] = trainer.optimizer
                checkpoints.save(acc_test, stored_models, epoch)

            if epoch == args.fuse_epoch:
                update_kernels(args, trainer.model, demog_combs, ndemog)

    if args.save_results:
        writer.close()
Exemple #16
0
# Create Model
models = Model(args)
gogan_model, criterion = models.setup(checkpoints)
netD = gogan_model[0]
netG = gogan_model[1]
netE = gogan_model[2]

if args.netD is not '':
    checkpointD = checkpoints.load(args.netD)
    netD.load_state_dict(checkpointD)
if args.netG is not '':
    checkpointG = checkpoints.load(args.netG)
    netG.load_state_dict(checkpointG)
if args.netE is not '':
    checkpointE = checkpoints.load(args.netE)
    netE.load_state_dict(checkpointE)

# Data Loading
dataloader = Dataloader(args)
test_loader = dataloader.create("Test", shuffle=False)

# The trainer handles the training loop and evaluation on validation set
# evaluate = Evaluate(args, netD, netG, netE)
generator = Generator(args, netD, netG, netE)

# test for a single epoch
# test_loss = evaluate.complete(test_loader)
# loss = generator.generate_one(test_loader)
loss = generator.interpolate(test_loader)
Exemple #17
0
def main():
    # parse the arguments
    args = config.parse_args()
    random.seed(args.manual_seed)
    torch.manual_seed(args.manual_seed)
    args.save = os.path.join(args.result_path, 'save')
    args.logs = os.path.join(args.result_path, 'logs')
    utils.saveargs(args)

    # initialize the checkpoint class
    checkpoints = Checkpoints(args)

    # Create Model
    models = Model(args)
    rankgan_model, criterion = models.setup(checkpoints)
    modelD = rankgan_model[0]
    modelG = rankgan_model[1]
    Encoder = rankgan_model[2]
    prevD, prevG = None, None

    if args.netD is not '':
        checkpointD = checkpoints.load(args.netD)
        modelD.load_state_dict(checkpointD)
    if args.netG is not '':
        checkpointG = checkpoints.load(args.netG)
        modelG.load_state_dict(checkpointG)
    if args.netE is not '':
        checkpointE = checkpoints.load(args.netE)
        Encoder.load_state_dict(checkpointE)
    if args.prevD is not '':
        prevD = copy.deepcopy(modelD)
        checkpointDprev = checkpoints.load(args.prevD)
        prevD.load_state_dict(checkpointDprev)
    if args.prevG is not '':
        prevG = copy.deepcopy(modelG)
        checkpointGprev = checkpoints.load(args.prevG)
        prevG.load_state_dict(checkpointGprev)

    # Data Loading
    dataloader = Dataloader(args)
    loader_train = dataloader.create(flag="Train")
    loader_test = dataloader.create(flag="Test")

    # The trainer handles the training loop and evaluation on validation set
    trainer = Trainer(args, modelD, modelG, Encoder, criterion, prevD, prevG)

    for epoch in range(args.nepochs):
        # train for a single epoch
        # cur_time = time.time()
        # if stage == 2:

        loss_train = trainer.train(epoch, loader_train)
        # if stage > 0:
        #     disc_acc = trainer.test(stage, epoch, loader_test)
        # print("Time taken = {}".format(time.time() - cur_time))

        try:
            torch.save(modelD.state_dict(), '%s/netD.pth' % (args.save, stage))
            for i in range(args.nranks - 1):
                torch.save(modelG.state_dict(),
                           '%s/order_%d_netG.pth' % (i + 1, stage))
        except Exception as e:
            print(e)