Ejemplo n.º 1
0
    def fit(self, train_data, validation_data, epochs=30, batch_size=4):
        depth = 64
        is_cuda = torch.cuda.is_available()
        _device = torch.device('cuda') if is_cuda else torch.device('cpu')

        train_dataset = CustomDataset(train_data, imgsz=self.imgsz, depth=depth)
        valid_dataset = CustomDataset(validation_data, imgsz=self.imgsz, depth=depth)

        total_train_iter = math.ceil(len(train_dataset) / batch_size)
        total_valid_iter = math.ceil(len(valid_dataset) / batch_size)
        # print(train_dataset)

        trainloader = DataLoader(train_dataset,
                                 batch_size=batch_size, 
                                 num_workers=4, 
                                 shuffle=False,
                                 pin_memory=True)

        validloader = DataLoader(valid_dataset, 
                                 # batch_size=int(batch_size / 2), 
                                 batch_size=batch_size,
                                 num_workers=4, 
                                 shuffle=False,
                                 pin_memory=True)

        optimizer = optim.Adam(self.parameters(), lr=1e-4)
        self = self.to(_device)
        
        for epoch in range(0, epochs):
            start = time.time()
            train_loss, train_acc = 0, 0
            self.train()
            # Training Part
            print(f"[Epoch {epoch + 1}/{epochs}] Start")
            for i, (img, label) in enumerate(trainloader):
                optimizer.zero_grad()
                img = Variable(img.to(_device))
                label = Variable(label.to(_device, dtype=torch.int64))
                
                out = self(img)
                acc = (torch.max(out, 1)[1].cpu().numpy() == torch.max(label, 1)[1].cpu().numpy())
                acc = float(np.count_nonzero(acc) / batch_size)
                loss = self.criterion(out, torch.max(label, 1)[1])
                loss.backward()
                optimizer.step()

                train_loss += loss.item()
                train_acc += acc
                print("[train %s/%3s] Epoch: %3s | Time: %6.2fs | loss: %6.4f | Acc: %g" % (
                        i + 1, total_train_iter, epoch + 1, time.time() - start, round(loss.item(), 4), float(acc)))

            train_loss = train_loss / total_train_iter
            train_acc = train_acc / total_train_iter
            print("[Epoch {} training Ended] > Time: {:.2}s/epoch | Loss: {:.4f} | Acc: {:g}\n".format(
                epoch + 1, time.time() - start, np.mean(train_loss), train_acc))

            val_loss, val_acc = self.evaluate(model=self, dataloader=validloader, valid_iter=total_valid_iter, batch_size=batch_size)
Ejemplo n.º 2
0
    def __init__(self, model,dataset_index=0,video_target = None):

        if args.video == None:
            
            self.video_target = video_target
            customset_train = CustomDataset(path = args.dataset_path,subset_type="training",dataset_index=dataset_index,video_target = video_target)
            customset_test = CustomDataset(path = args.dataset_path,subset_type="testing",dataset_index=dataset_index, video_target = video_target)
        
            self.trainloader = torch.utils.data.DataLoader(dataset=customset_train,batch_size=args.batch_size,shuffle=True,num_workers=args.num_workers)
            self.testloader = torch.utils.data.DataLoader(dataset=customset_test,batch_size=args.batch_size,shuffle=False,num_workers=args.num_workers)    
        else:
            video_dataset = VideoDataset(video=args.video, batch_size=args.batch_size,
                                        frame_skip=int(args.frame_skip),image_folder=args.extract_frames_path, use_existing=args.use_existing_frames)
            
            self.videoloader = torch.utils.data.DataLoader(dataset=video_dataset, batch_size=1,shuffle=False,num_workers=args.num_workers)

   
        if (model == "alex"):
            self.model = AlexNet()
        elif (model == "vgg"):
            self.model = VGG()
        elif (model == "resnet"):
            self.model = ResNet()

        if args.pretrained_model != None:
            if args.pretrained_finetuning == False:
                self.model.load_state_dict(torch.load(args.pretrained_model))
            else:
                print "DEBUG : Make it load only part of the resnet model"
                #print(self.model)
                #self.model.load_state_dict(torch.load(args.pretrained_model))
                #for param in self.model.parameters():
                #    param.requires_grad = False
                self.model.fc = nn.Linear(512, 1000)
                #print(self.model)
                self.model.load_state_dict(torch.load(args.pretrained_model))
                self.model.fc = nn.Linear(512,3)
                #print(self.model)
                
        self.model.cuda()        
        print "Using weight decay: ",args.weight_decay
        self.optimizer = optim.SGD(self.model.parameters(), weight_decay=float(args.weight_decay),lr=0.01, momentum=0.9,nesterov=True)
        self.criterion = nn.CrossEntropyLoss().cuda()
Ejemplo n.º 3
0
def main():
    custom = CustomDataset('D:/dataset/tiny/')
    name_c = custom.label_name
    num_class = custom.num_label

    num_img = 50000

    generator = mm.Generator(n_noise, num_class).cuda()

    eval_data = iter(DataLoader(custom, batch_size=num_img))
    real_images = next(eval_data)[0].cpu().numpy()

    start_time = time.time()
    for iter_count in range(saving_iter, Max_iter+1, saving_iter):

        Checkpoint = model_path + '/cVG iter ' + str(iter_count) + '/Train_' + str(iter_count) + '.pth'

        print(iter_count)
        print('Weight Restoring.....')
        generator.load_state_dict(torch.load(Checkpoint)['gen'])
        print('Weight Restoring Finish!')

        print('Evaluation start')

        fake_images = gen_images(generator, n_noise, num_class, name_c,
                                 save_path=save_path+'/%d/' % iter_count,
                                 num_img=num_img, save_img=False)

        # Calculate FID scores
        print('Calculate FID scores')
        fid_score = fid.calculate_fid(fake_images, real_images, batch_size=10)

        # Calculate Inception scores
        print('Calculate Inception scores')
        is_score = inception_score.inception_score(fake_images, batch_size=10, splits=10)

        del fake_images

        if not os.path.isdir(save_path):
            os.makedirs(save_path)
        with open(save_path + '/log_FID.txt', 'a+') as f:
            data = 'itr : %05d\t%.3f\n' % (iter_count, fid_score)
            f.write(data)
        with open(save_path + '/log_IS.txt', 'a+') as f:
            data = 'itr : %05d\t%.3f\t%.3f\n' % (iter_count, is_score[0], is_score[1])
            f.write(data)

        print('Evaluation Finish')

        consume_time = time.time() - start_time
        print(consume_time)
        start_time = time.time()
Ejemplo n.º 4
0
def conformal_prediction(vp):

    vp.model.eval()
    dataset = CustomDataset(path = args.dataset_path,subset_type="training",dataset_index=args.dataset_index)
    dataloader = torch.utils.data.DataLoader(dataset=dataset,batch_size=128,shuffle=False,num_workers=args.num_workers)
    dataset2 =  CustomDataset(path = args.dataset_path,subset_type="testing",dataset_index=args.dataset_index)
    dataloader2 = torch.utils.data.DataLoader(dataset=dataset2, batch_size=128, shuffle=False, num_workers=args.num_workers)

    MCL = [[],[],[]]
    # only include samples that have : correct classification with low p-value
    # or incorrect classification
    # ask xu
    for data, target, frame_num, game_id in dataloader:
        tn = target.numpy()
        data = data.cuda()
        data = Variable(data)
        output = vp.model(data)
        print output
        for i in range(128):
            MCL[tn[i]].append(output[i])
    for data, target, frame_num, game_id in dataloader2:
        tn = target.numpy()
        data = data.cuda()
        data = Variable(data)
        output = vp.model(data)
        print output
        for i in range(128):
            MCL[tn[i]].append(output[i])

    pickle.dump(MCL,open("./MCL.pkl","wb"))
    print MCL
    MCL = [MCL[0].sort, MCL[1].sort, MCL[2].sort]
    
    # train-test 2014
    for epsilon in range(0,11):
        e = epsilon/10.0
Ejemplo n.º 5
0
def extract_ground_truth(pickle_folder,dataset_index,dataset_folder, video_target = None):
    
    dataset = CustomDataset(path = args.dataset_path,subset_type="training",dataset_index=dataset_index,video_target=video_target)
    dataloader = torch.utils.data.DataLoader(dataset=dataset,batch_size=1,shuffle=False,num_workers=args.num_workers)

    prediction_list = {}
    
    for data, target, frame_num, game_id in dataloader:
        
        fn = int(frame_num.numpy()[0])
        tn = int(target.numpy()[0])
        
        prediction_list[fn] = tn

    with open(os.path.join(pickle_folder,"gt_viewpoint.pkl"),"wb") as f:
        pickle.dump(prediction_list,f)
Ejemplo n.º 6
0
def viterbi_training():

    dataset = CustomDataset(path = args.dataset_path,subset_type="training",dataset_index=2)
    dataloader = torch.utils.data.DataLoader(dataset=dataset,batch_size=128,shuffle=False,num_workers=args.num_workers)

    transition_matrix = np.zeros((3,3))
    tn_p = None
    i = 0
    for data, target, frame_num in dataloader:
        
        tn = target.numpy()
        if tn_p != None:   
            for i in range(128):
                transition_matrix[tn_p[i],tn[i]] = transition_matrix[tn_p[i],tn[i]]+1
        tn_p = tn
        i+=1
        
        if i % 100 == 0:
            print np.multiply(transition_matrix,1.0/i)    
Ejemplo n.º 7
0
def test(
    path_to_test_data: str = None,
    path_to_models: str = None,
    path_to_save_results: str = None,
):
    if path_to_test_data is None:
        path_to_test_data = "./data/external/val"
        path_to_save_results = "solution.csv"
        path_to_models = "./models/"

    # params of dataset and normalization
    num_classes = 3
    features_stats = {"mean": 86, "std": 22}

    # get test files and make predictions
    test_files = get_data_files_from_folder(path_to_test_data,
                                            data_type=".wav")
    test_files_names = [f.split(".")[0] for f in test_files]
    test_dataset = CustomDataset(path_to_data=path_to_test_data,
                                 files_to_use=test_files,
                                 load_data_to_mem=True,
                                 path_to_targets=None,
                                 features_stats=features_stats,
                                 mode="test")

    # make predictions using several models
    models_paths = [
        os.path.join(path_to_models, f) for f in os.listdir(path_to_models)
        if f.endswith(".pt")
    ]
    y_hat = np.zeros((len(test_files_names), num_classes))
    for model_path in tqdm(models_paths):
        model = torch.load(model_path)
        y_hat_test = make_prediction(model, test_dataset)
        y_hat += y_hat_test / len(models_paths)
    y_hat = np.argmax(y_hat, axis=1)

    # save test predictions
    results = pd.DataFrame({})
    results["wav_id"] = test_files_names
    results["label"] = y_hat
    results.to_csv(path_to_save_results, index=False)
    print("Done!")
Ejemplo n.º 8
0
if __name__ == '__main__':
    with open('cfg.yaml', 'r') as f:
        config = yaml.load(f)
        cfg = EasyDict(config)
    args = parse()
    cfg.update(vars(args))

    if cfg.exam:
        assert cfg.data == 'NYU303', 'provide one example of nyu303 to test'
    #  dataset
    if cfg.data == 'Structured3D':
        dataset = Structured3D(cfg.Dataset.Structured3D, 'test')
    elif cfg.data == 'NYU303':
        dataset = NYU303(cfg.Dataset.NYU303, 'test', exam=cfg.exam)
    elif cfg.data == 'CUSTOM':
        dataset = CustomDataset(cfg.Dataset.CUSTOM, 'test')
    else:
        raise NotImplementedError

    dataloader = torch.utils.data.DataLoader(dataset,
                                             num_workers=cfg.num_workers)

    # create network
    model = Detector()
    # compute loss
    criterion = Loss(cfg.Weights)

    # set data parallel
    # if cfg.num_gpus > 1 and torch.cuda.is_available():
    #     model = torch.nn.DataParallel(model)
Ejemplo n.º 9
0
def compute_mstd(args):
    customset = CustomDataset(path = args.dataset_path,subset_type="mstd",dataset_index=args.dataset_index)
    mstd_loader = torch.utils.data.DataLoader(dataset=customset,batch_size=args.batch_size,shuffle=True,num_workers=args.num_workers)
    compute_mean_std(mstd_loader)
Ejemplo n.º 10
0
    def fit(self, x, y, validation_data, epochs=30, batch_size=4, callbacks=[]):
        device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
        imgsz = self.job.model.get_target_size()

        my_getmetric = callbacks[0]
        my_savebestweight = callbacks[1]

        trainpack = (x, y)
        validpack = validation_data

        train_dataset = CustomDataset(trainpack, imgsz=imgsz)
        valid_dataset = CustomDataset(validpack, imgsz=imgsz)

        total_train_iter = math.ceil(len(x) / batch_size)
        total_valid_iter = math.ceil(len(validation_data[0]) / batch_size)
        # print(train_dataset)

        trainloader = DataLoader(train_dataset,
                                 batch_size=batch_size, 
                                 num_workers=4, 
                                 shuffle=False,
                                 pin_memory=True)

        validloader = DataLoader(valid_dataset, 
                                 # batch_size=int(batch_size / 2), 
                                 batch_size=batch_size,
                                 num_workers=4, 
                                 shuffle=False,
                                 pin_memory=True)

        optimizer = optim.SGD(self.parameters(), lr=1e-4, momentum=0.9)
        self = self.to(device)
        
        for epoch in range(0, epochs):
            start = time.time()
            train_loss, train_acc = 0, 0
            self.train()
            # Training Part
            print(f"[Epoch {epoch + 1}/{epochs}] Start")
            for i, (img, label) in enumerate(trainloader):
                optimizer.zero_grad()
                img = Variable(img.to(device))
                label = Variable(label.to(device, dtype=torch.int64))

                out = self(img)
                acc = (torch.max(out, 1)[1].cpu().numpy() == torch.max(label, 1)[1].cpu().numpy())
                acc = float(np.count_nonzero(acc) / batch_size)
                loss = self.criterion(out, torch.max(label, 1)[1])
                loss.backward()
                optimizer.step()

                train_loss += loss.item()
                train_acc += acc
                print("[train %s/%3s] Epoch: %3s | Time: %6.2fs | loss: %6.4f | Acc: %g" % (
                        i + 1, total_train_iter, epoch + 1, time.time() - start, round(loss.item(), 4), float(acc)))

            train_loss = train_loss / total_train_iter
            train_acc = train_acc / total_train_iter
            print("[Epoch {} training Ended] > Time: {:.2}s/epoch | Loss: {:.4f} | Acc: {:g}\n".format(
                epoch + 1, time.time() - start, np.mean(train_loss), train_acc))

            val_loss, val_acc = self.evaluate(model=self, dataloader=validloader, valid_iter=total_valid_iter, batch_size=batch_size);

            # (dc) GetMetricCallback ==================================================================================
            my_getmetric.save_status(epoch + 1, 
                                     metrics=[[round(train_loss, 4), round(train_acc, 4)],   # train metric
                                              [round(val_loss, 4), round(val_acc, 4)]],      # validation metric
                                     metrics_name=[['loss', 'acc'],                 # train metric name
                                                   ['loss', 'acc']]                 # validation metric name
                                    )   
            # =========================================================================================================

            # (dc) SaveBestWeightCallback =============================================================================
            my_savebestweight.save_best_weight(self, model_metric=val_loss, compared='less')
Ejemplo n.º 11
0
def train():
    load_to_mem_train = True

    features_stats = {"mean": 86, "std": 22}
    batch_size = 512
    train_batch_size = batch_size
    validation_batch_size = train_batch_size

    path_to_train_data = "./data/external/train/"
    path_to_targets_train = "./data/external/train.csv"
    path_to_val_data = "./data/external/val/"
    path_to_targets_val = "./data/external/val.csv"
    path_to_save_model = "./models/model.pt"

    n_epochs = 2
    es_rounds = 35
    lr = 0.001

    backbone_output_dim = 1024
    backbone = VGGNet()
    model = Supervised1dModel(backbone=backbone,
                              backbone_output_dim=backbone_output_dim,
                              num_classes=3)

    n_workers = os.cpu_count()

    # define train, val and test datasets
    train_dataset = CustomDataset(path_to_data=path_to_train_data,
                                  files_to_use=None,
                                  load_data_to_mem=load_to_mem_train,
                                  path_to_targets=path_to_targets_train,
                                  features_stats=features_stats,
                                  mode="train")

    val_dataset = CustomDataset(path_to_data=path_to_val_data,
                                files_to_use=None,
                                load_data_to_mem=load_to_mem_train,
                                path_to_targets=path_to_targets_val,
                                features_stats=features_stats,
                                mode="val")

    # define train, val and test loaders
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=train_batch_size,
                                               collate_fn=collate_fn,
                                               num_workers=n_workers,
                                               shuffle=True)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=validation_batch_size,
                                             collate_fn=collate_fn,
                                             num_workers=n_workers,
                                             shuffle=False)

    # train
    runner = dl.SupervisedRunner()
    criterion = torch.nn.CrossEntropyLoss()
    callbacks = [
        dl.F1ScoreCallback(),
        dl.EarlyStoppingCallback(patience=es_rounds, minimize=True)
    ]

    print("\n\n")
    print("Main training")
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
        optimizer, T_0=25, verbose=True)
    runner.train(model=model,
                 criterion=criterion,
                 optimizer=optimizer,
                 scheduler=scheduler,
                 loaders={
                     "train": train_loader,
                     "valid": val_loader
                 },
                 num_epochs=n_epochs,
                 callbacks=callbacks,
                 logdir="./logdir/",
                 load_best_on_end=True,
                 main_metric="f1_score",
                 minimize_metric=False,
                 fp16=True,
                 verbose=True)

    # save trained model
    torch.save(model, path_to_save_model)
Ejemplo n.º 12
0
        if cuda:
            images = images.cuda()
        embeddings[k:k + len(images)] = model.get_embedding(
            images).data.cpu().numpy()
        labels[k:k + len(images)] = target.numpy()
        k += len(images)
    return embeddings, labels


normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])

train_dataset = CustomDataset(
    csv_file='data.csv',
    root_dir=
    'C:\\Users\\alex\\Desktop\\pytourch\\siamese-triplet-master\\images\\',
    transform=transforms.Compose([
        transforms.Scale(299),
        transforms.ToTensor()
        # normalize
        # transforms.Normalize((mean,), (std,))
    ]))

test_dataset = CustomDataset(
    csv_file='data.csv',
    root_dir=
    'C:\\Users\\alex\\Desktop\\pytourch\\siamese-triplet-master\\images\\',
    transform=transforms.Compose([
        transforms.Scale(299),
        transforms.ToTensor()
        # normalize
        # transforms.Normalize((mean,), (std,))
    ]))
Ejemplo n.º 13
0
def train():
    cuda = torch.cuda.is_available()

    batch_size = 32

    # Import Model
    net = SRresnet()
    net.train()
    discriminator = Discriminator()

    # DataLoader
    dataset = CustomDataset()
    customImageLoader = DataLoader(
        dataset, batch_size=batch_size, shuffle=True)

    epochs = 10

    # Criterion, Optimizer, Scheduler
    G_optimizer = optim.Adam(net.parameters(), lr=0.001)
    D_optimizer = optim.Adam(discriminator.parameters(), lr=0.001)

    criterion = nn.L1Loss()

    scheduler = optim.lr_scheduler.StepLR(
        optimizer=G_optimizer, step_size=1, gamma=0.1)

    if cuda:
        net.cuda()
        discriminator.cuda()
        criterion.cuda()

    Tensor = torch.cuda.FloatTensor if cuda else torch.Tensor

    for epoch in range(epochs):
        for i, imgs in enumerate(customImageLoader):
            hr_img = Variable(imgs['hr'], requires_grad=False).type(Tensor)
            lr_img = Variable(imgs['lr']).type(Tensor)

            # Adversarial ground truths
            valid = Variable(
                Tensor(np.ones((lr_img.size(0), 1))), requires_grad=False)
            fake = Variable(Tensor(np.zeros((lr_img.size(0), 1))),
                            requires_grad=False)

            # Train Generator
            gen_hr = net(lr_img)
            loss = criterion(discriminator(gen_hr), valid)

            G_optimizer.zero_grad()
            loss.backward()
            G_optimizer.step()

            # Train Discriminator
            D_optimizer.zero_grad()
            loss_real = criterion(discriminator(hr_img), valid)
            loss_fake = criterion(discriminator(gen_hr.detach()), fake)

            loss_D = (loss_real + loss_fake) / 2

            loss_D.backward()
            D_optimizer.step()

            if i % 100 == 0:
                print("==> Epoch [{}] ({} / {}) : Loss : {:.5}".format(
                    epoch, i, len(customImageLoader), loss.item()))
                print("==> Epoch [{}] ({} / {}) : Loss_D : {:.5}".format(
                    epoch, i, len(customImageLoader), loss_D.item()))
        scheduler.step()
        save_checkpoint(net, epoch, G_optimizer, D_optimizer, criterion)
def main():
    custom = CustomDataset('D:/dataset/tiny/')
    name_c = custom.label_name
    num_class = custom.num_label

    data_loader = DataLoader(custom,
                             batch_size=batch_size * GD_ratio,
                             shuffle=True,
                             drop_last=True)

    generator = mm.Generator(n_noise, num_class).cuda()
    discriminator = mm.Discriminator(num_class).cuda()

    optim_disc = optim.Adam(discriminator.parameters(),
                            lr=0.0002,
                            betas=(0.0, 0.9))
    optim_gen = optim.Adam(generator.parameters(), lr=0.0002, betas=(0.0, 0.9))

    if restore:
        print('Weight Restoring.....')
        generator.load_state_dict(torch.load(Checkpoint)['gen'])
        discriminator.load_state_dict(torch.load(Checkpoint)['dis'])
        optim_gen.load_state_dict(torch.load(Checkpoint)['opt_gen'])
        optim_disc.load_state_dict(torch.load(Checkpoint)['opt_dis'])
        torch.cuda.empty_cache()
        print('Weight Restoring Finish!')

    print('Training start')
    is_training = True
    iter_count = restore_point
    start_time = time.time()
    for e in range(100000):
        if not is_training:
            break
        for step, (img_real, class_img) in enumerate(data_loader):
            D_loss = 0

            for gd in range(GD_ratio):
                with torch.no_grad():
                    img_gen = generator(
                        torch.randn(batch_size, n_noise).cuda(),
                        class_img[gd::GD_ratio].cuda())

                dis_fake = discriminator(img_gen,
                                         class_img[gd::GD_ratio].cuda())
                dis_real = discriminator(img_real[gd::GD_ratio].cuda(),
                                         class_img[gd::GD_ratio].cuda())

                D_loss = torch.mean(torch.relu(1. - dis_real)) + torch.mean(
                    torch.relu(1. + dis_fake))
                optim_disc.zero_grad()
                D_loss.backward()
                optim_disc.step()

            img_gen = generator(
                torch.randn(batch_size, n_noise).cuda(),
                class_img[0::GD_ratio].cuda())
            dis_fake = discriminator(img_gen, class_img[0::GD_ratio].cuda())
            dis_real = None

            G_loss = -torch.mean(dis_fake)
            optim_disc.zero_grad()
            optim_gen.zero_grad()
            G_loss.backward()
            optim_gen.step()
            iter_count += 1

            if iter_count % 100 == 0:
                consume_time = time.time() - start_time
                print(
                    '%d\t\tEpoch : %d\t\tLoss_D = %.3f\t\tLoss_G = %.3f\t\ttime = %.4f'
                    % (iter_count, e, D_loss.item(), G_loss.item(),
                       consume_time))
                start_time = time.time()

            if iter_count % saving_iter == 0:

                print('SAVING MODEL')
                Temp = model_path + '/cVG iter %s/' % iter_count

                if not os.path.exists(Temp):
                    os.makedirs(Temp)

                SaveName = Temp + 'Train_%s.pth' % iter_count
                torch.save(
                    {
                        'gen': generator.state_dict(),
                        'dis': discriminator.state_dict(),
                        'opt_gen': optim_gen.state_dict(),
                        'opt_dis': optim_disc.state_dict(),
                    }, SaveName)

                save_images(generator,
                            n_noise,
                            num_class,
                            name_c,
                            save_path=save_path + '/img/')

                print('SAVING MODEL Finish')

            if iter_count % (saving_iter * 10) == 0:

                print('Evaluation start')

                fid_score, is_score = evaluate(generator,
                                               n_noise,
                                               num_class,
                                               name_c,
                                               custom,
                                               num_img=50000,
                                               save_img=False)

                with open(save_path + '/log_FID.txt', 'a+') as f:
                    data = 'itr : %05d\t%.3f\n' % (iter_count, fid_score)
                    f.write(data)
                with open(save_path + '/log_IS.txt', 'a+') as f:
                    data = 'itr : %05d\t%.3f\t%.3f\n' % (
                        iter_count, is_score[0], is_score[1])
                    f.write(data)

                print('Evaluation Finish')

            if iter_count == Max_iter:
                is_training = False
                break