Esempio n. 1
0
    def __init__(self, trainintfile, testfile, outputfile ):
        loader = DataLoader()
        loader.loadTrainingData(trainintfile)

        self.features_names = loader.featureNames()
        self.images, self.IDs = loader.loadTestData(testfile)

        self.df_output = pandas.read_csv(outputfile).drop(['Unnamed: 0'], axis = 1)
        pyplot.subplot()
Esempio n. 2
0
File: main.py Progetto: Attil/WEDT
def main(args):
    dl = DataLoader()
    stem = Stemmer('porter')

    # files is a list of files, which are lists of lines, which are lists of words
    files = [{element[0]: stem.stem(element[1]) for element in dl.load_data(file) if stem.stem(element[1])} for file in args]

    for file, arg in zip(files, args):
        print('Processing file {}...'.format(arg))
        file = {k: list(v) for k, v in file.items()}

        print('Data Clusterer')
        test_clusterer(DataClusterer(list(file.values()), 'euclidean'), file)

        print('-'*64)

        print('Description Clusterer')
        test_clusterer(DescriptionClusterer(list(file.values()), 'cosine'), file)
Esempio n. 3
0
import json

base_dir = '../data/'
dataset_name = sys.argv[1]

#Dataset
if dataset_name == 'bn1':
    dataset = pd.read_csv(base_dir + dataset_name + '.csv')
    dataset.drop(['Unnamed: 0'], axis=1, inplace=True)

    params = {
        'dataframe': dataset.copy(),
        'continuous_features': ['x1', 'x2', 'x3'],
        'outcome_name': 'y'
    }
    d = DataLoader(params)
    train_data_vae = d.data_df.copy()
    #train_data_vae.drop('y', axis=1, inplace=True)
    columns = train_data_vae.columns

elif dataset_name == 'adult':
    dataset = load_adult_income_dataset()

    params = {
        'dataframe': dataset.copy(),
        'continuous_features': ['age', 'hours_per_week'],
        'outcome_name': 'income'
    }
    d = DataLoader(params)
    # d = dice_ml.Data(dataframe=dataset, continuous_features=['age', 'hours_per_week'], outcome_name='income')
    train_data_vae = d.data_df.copy()
Esempio n. 4
0
    else:
        print(" >> Using Cpu")
        params.device = torch.device('cpu')

    random.seed(args.seed)
    torch.manual_seed(args.seed)

    params.seed = args.seed
    params.tag = args.tag
    params.save_dir = args.save_dir
    params.batch_size = args.batch_size
    params.epoch_num = args.num_epoch
    params.save_freq = args.save_freq
    params.save_checkpoints = args.save_checkpoints

    dataloader = DataLoader(path_to_data=args.train_data, seed=params.seed, shuffle=True)

    params.lr = args.lr
    params.max_grad_norm = 1.0
    params.num_total_steps = (dataloader.size()[0]// params.batch_size) * params.epoch_num
    params.num_warmup_steps = args.warmup_steps

    tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
    dataloader.pre_encode(tokenizer)

    #model = DistilBertForTokenClassification(2, args.top_rnn) if args.distil else BertForTokenClassification.from_pretrained('bert-base-uncased', num_labels=2)
    #model = BertForTokenClassification.from_pretrained('./temp/pytorch_model.bin', num_labels=2)
    if args.restore_file is not None:
        model = BertForTokenClassification.from_pretrained(args.restore_file, num_labels=2)
    else:
        model = BertForTokenClassification.from_pretrained('bert-base-uncased', num_labels=2)
def main():
    set_random_seed(0)

    parser = argparse.ArgumentParser()
    parser.add_argument('--exp',
                        type=str,
                        default='GenericFastAdapt',
                        help='config file with parameters of the experiment')
    parser.add_argument('--num_workers',
                        type=int,
                        default=4,
                        help='number of data loading workers')
    parser.add_argument('--cuda', type=bool, default=True, help='enables cuda')
    parser.add_argument(
        '--imagedir',
        type=str,
        default='',
        help='path to image directory containing client images')
    parser.add_argument('--experts_dir',
                        type=str,
                        default='',
                        help='path to directory containing experts')
    args_opt = parser.parse_args()

    exp_config_file = os.path.join('.', 'config', args_opt.exp + '.py')
    exp_directory = os.path.join(
        '.', 'experiments',
        args_opt.exp + '-' + args_opt.experts_dir.replace('/', '-'))

    # Load the configuration params of the experiment
    print('Launching experiment: %s' % exp_config_file)
    config = imp.load_source("", exp_config_file).config
    config[
        'exp_dir'] = exp_directory  # the place where logs, models, and other stuff will be stored

    config['image_directory'] = args_opt.imagedir

    data_test_opt = config['data_test_opt']
    dataset_test = GenericDataset(dataset_name=data_test_opt['dataset_name'],
                                  split=data_test_opt['split'],
                                  config=config)

    dloader_test = DataLoader(dataset=dataset_test,
                              batch_size=data_test_opt['batch_size'],
                              num_workers=args_opt.num_workers,
                              shuffle=False)

    z = {}
    algorithm = ClassificationModel(config)

    for expert_fname in os.listdir(args_opt.experts_dir):
        expert_path = os.path.join(args_opt.experts_dir, expert_fname)
        if not os.path.isfile(expert_path):
            continue
        algorithm.init_network()
        algorithm.load_pretrained(expert_path)
        if args_opt.cuda:
            algorithm.load_to_gpu()
        eval_stats = algorithm.evaluate(dloader_test)
        z[expert_fname] = eval_stats['prec1']

    print(z)
    with open(os.path.join(exp_directory, 'z.pickle'), 'wb') as f:
        pickle.dump(z, f, protocol=pickle.HIGHEST_PROTOCOL)
#Main Code
base_data_dir = '../data/'
base_model_dir = '../models/'
dataset_name = args.dataset_name

#Dataset
if dataset_name == 'bn1':
    dataset = pd.read_csv(base_data_dir + dataset_name + '.csv')
    dataset.drop(['Unnamed: 0'], axis=1, inplace=True)
    params = {
        'dataframe': dataset.copy(),
        'continuous_features': ['x1', 'x2', 'x3'],
        'outcome_name': 'y'
    }
    d = DataLoader(params)

elif dataset_name == 'adult':
    dataset = load_adult_income_dataset()
    params = {
        'dataframe': dataset.copy(),
        'continuous_features': ['age', 'hours_per_week'],
        'outcome_name': 'income'
    }
    d = DataLoader(params)

#Load Black Box Prediction Model
data_size = len(d.encoded_feature_names)
pred_model = BlackBox(data_size)
path = base_model_dir + dataset_name + '.pth'
pred_model.load_state_dict(torch.load(path))
import cv2

from dataloader import DataLoader
from harrisdetector import harris_corners
from pointTracker import PointTracker
from pointProjection import project_points
import time
from debug.PointsVisualizer import PointVisualizer

# pathen her vil ikke fungere når dette uploades til git. uploader ikke dataset
#rgbd_dataset_freiburg1_rpy
#rgbd_dataset_freiburg1_xyz
#rgbd_dataset_freiburg1_rgb_calibration
dl = DataLoader('dataset/rgbd_dataset_freiburg1_rpy'
                )  # Edit this string to load a different dataset

tracker = PointTracker()
vis = PointVisualizer()

# Set initial position of cameras in visualizer
initial_orientation, initial_position = dl.get_transform()
vis.set_groundtruth_transform(initial_orientation, initial_position)
vis.set_estimated_transform(initial_orientation, initial_position)

# Get points for the first frame
grey_img = dl.get_greyscale()
depth_img = dl.get_depth()
points_and_response = harris_corners(grey_img)
tracker.add_new_corners(grey_img, points_and_response)

# Project the points in the first frame
def evaluate(im0_path,
             restore_path,
             output_path,
             n_batch=settings.N_BATCH,
             n_height=settings.N_HEIGHT,
             n_width=settings.N_WIDTH,
             n_channel=settings.N_CHANNEL,
             n_pyramid=settings.N_PYRAMID,
             max_disparity=settings.MAX_DISPARITY,
             n_gpu=settings.N_GPU,
             n_thread=settings.N_THREAD):
    """Test function."""
    # Create dataloader for computation graph
    dataloader = DataLoader(shape=[n_batch, n_height, n_width, n_channel],
                            name='dataloader',
                            n_thread=n_thread,
                            prefetch_size=n_thread,
                            normalize=True,
                            random_flip=False,
                            random_gamma=False,
                            gamma_range=[0.8, 1.2],
                            random_brightness=False,
                            brightness_range=[0.5, 2.0],
                            random_color=False,
                            color_range=[0.8, 1.2])
    # Build model
    model = MonoDispNet(dataloader.next_element[0],
                        dataloader.next_element[1],
                        n_pyramid=n_pyramid,
                        max_disparity=max_disparity)
    # Start a Tensorflow session
    config = tf.ConfigProto(allow_soft_placement=True)
    config.gpu_options.allow_growth = True
    session = tf.Session(config=config)
    # Initialize saver that will be used for restore
    train_saver = tf.train.Saver()
    # Initialize all variables
    session.run(tf.global_variables_initializer())
    session.run(tf.local_variables_initializer())
    # Restore weights from checkpoint
    log('Restoring from: %s' % restore_path)
    train_saver.restore(session, restore_path)
    # Load the files for evaluation
    im0_paths = data_utils.read_paths(im0_path)
    n_sample = len(im0_paths)
    im0_paths = data_utils.pad_batch(im0_paths, n_batch)
    n_step = len(im0_paths) // n_batch
    log('Evaluating %d files...' % n_sample)
    dataloader.initialize(session,
                          im0_paths=im0_paths,
                          im1_paths=im0_paths,
                          augment=False)

    d_arr = np.zeros((n_step * n_batch, n_height, n_width), dtype=np.float32)
    start_time = time.time()
    for step in range(n_step):
        batch_start = step * n_batch
        batch_end = step * n_batch + n_batch
        d = session.run(model.model0[0])
        d_arr[batch_start:batch_end, :, :] = d[:, :, :, 0]
    end_time = time.time()
    log('Total time: %.1f ms  Average time per image: %.1f ms' %
        (1000 * (end_time - start_time),
         (1000 * (end_time - start_time) / n_sample)))
    d_arr = d_arr[0:n_sample, :, :]
    output_path = os.path.join(output_path, 'disparities.npy')
    log('Storing predictions to %s' % output_path)
    if not os.path.exists(os.path.dirname(output_path)):
        os.makedirs(os.path.dirname(output_path))
    np.save(output_path, d_arr)
Esempio n. 9
0
# read config file
with open('./configs/train_params.yaml') as config_file:
    config = yaml.load(config_file, Loader=yaml.FullLoader)

config = Dict(config)

train_df = pd.read_csv('../data/train_dataset.csv')
test_df = pd.read_csv('../data/test_dataset.csv')

train_dataset = Dataset(df = train_df,
                        image_folder_path='../data/images',
                        augmentations=get_training_augmentation())

test_dataset = Dataset(df = test_df,
                        image_folder_path='../data/images',
                        augmentations=get_test_augmentation())


train_dataloader = DataLoader(dataset=train_dataset, batch_size=config.batch_size, shuffle=False)
test_dataloader = DataLoader(dataset=test_dataset, batch_size=config.batch_size, shuffle=False)

model = get_model()

optimizer = tf.keras.optimizers.Adam(learning_rate = config.learning_rate)
loss = tf.keras.losses.CategoricalCrossentropy()

model.compile(optimizer=optimizer, loss = loss, metrics=['accuracy'])

model.fit(x = train_dataloader, steps_per_epoch=len(train_dataloader), epochs=config.epochs,
          validation_data = test_dataloader, validation_steps=len(test_dataloader))
Esempio n. 10
0
def run(final_file, kaoqin_dir, jixiao_dir, fenc='utf8'):
    dl = DataLoader()
    dl.load(kaoqin_dir, jixiao_dir, final_file, fenc)
    dl.check()
Esempio n. 11
0
def main():
    """ Train the Yolov3 Model """

    # If there checkpoint is already, assign checkpoint=checkpoint_file
    checkpoint = None

    # Set epochs, load the data and the trainable model
    start_epoch = 0
    end_epoch = 7000
    learning_rate = 1e-3
    batch_size = 6

    model = DarkNet()
    data = DataLoader(416, "data/train")
    dataloader = torch.utils.data.DataLoader(dataset=data,
                                             batch_size=batch_size,
                                             num_workers=0,
                                             shuffle=True)
    model = model.to("cuda")
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    # If there's a checkpoint, load its values
    if checkpoint != None:
        model.load_state_dict(torch.load(checkpoint)['state_dict'])
        optimizer.load_state_dict(torch.load(checkpoint)['optimizer'])
        start_epoch = torch.load(checkpoint)['epoch']

    for param in model.parameters():
        param.requires_grad = True
    count = 0
    x_y = []
    w_h = []
    conf_loss = []
    final_loss = []

    # Train the model
    print("Starting Training..")

    for epoch in range(start_epoch, end_epoch):
        print(
            "------------------------------------------------------------------------------------------------------------"
        )
        for batch_id, (imgs, target) in enumerate(dataloader):
            imgs = imgs.cuda()
            target = target.cuda()
            optimizer.zero_grad()
            loss = model(imgs, target)
            loss.backward()
            optimizer.step()
            if batch_id % 10 == 0:
                print(
                    "Epoch %d/%d || Batch %d || Overall Loss %.2f || X-Loss %.2f || Y-Loss %.2f || W-Loss %.2f || H-Loss %.2f"
                    %
                    (epoch, end_epoch, batch_id, loss.item(), model.losses[0],
                     model.losses[1], model.losses[2], model.losses[3]))
        x_y.append(model.losses[0] + model.losses[1])
        w_h.append(model.losses[2] + model.losses[3])
        conf_loss.append(model.losses[4])
        final_loss.append(loss.item())

    # Plot the graph to check if the loss is decreasing through the epochs

    # X-Y Loss
    plt.plot(x_y, label='X and Y')
    plt.savefig('x-y-loss.png')
    plt.close()

    # W-H Loss
    plt.plot(w_h, label='W and H')
    plt.savefig('w-h-loss.png')
    plt.close()

    # Confidence Loss
    plt.plot(conf_loss, label='Conf')
    plt.savefig('conf-loss.png')
    plt.close()

    # Overall Loss
    plt.plot(final_loss, label='Loss')
    plt.savefig('final-loss.png')
    plt.show()
    plt.close()

    # Save the model as checkpoint
    torch.save(
        {
            'epoch': epoch,
            'state_dict': model.state_dict(),
            'optimizer': optimizer.state_dict()
        }, 'checkpoints/checkpoint.epoch.{}.pth.tar'.format(epoch))
Esempio n. 12
0

dataset_train = GenericDataset(
    dataset_name=data_train_opt['dataset_name'],
    split=data_train_opt['split'],
    random_sized_crop=data_train_opt['random_sized_crop'])
dataset_test = GenericDataset(
    dataset_name=data_test_opt['dataset_name'],
    split=data_test_opt['split'],
    random_sized_crop=data_test_opt['random_sized_crop'])

dloader_train = DataLoader(
    dataset=dataset_train,
    batch_size=data_train_opt['batch_size'],
    unsupervised=data_train_opt['unsupervised'],
    epoch_size=data_train_opt['epoch_size'],
    num_workers=args_opt.num_workers,
    fillcolor=(128,128,128),
    resample=PIL.Image.BILINEAR,
    shuffle=True)

dloader_test = DataLoader(
    dataset=dataset_test,
    batch_size=data_test_opt['batch_size'],
    unsupervised=data_test_opt['unsupervised'],
    epoch_size=data_test_opt['epoch_size'],
    num_workers=args_opt.num_workers,
    fillcolor=(128,128,128),
    resample=PIL.Image.BILINEAR,
    shuffle=False)

dataset_train = GenericDataset(
    dataset_name=data_train_opt['dataset_name'],
    split=data_train_opt['split'],
    random_sized_crop=data_train_opt['random_sized_crop'],
    num_imgs_per_cat=num_imgs_per_cat)
dataset_test = GenericDataset(
    dataset_name=data_test_opt['dataset_name'],
    split=data_test_opt['split'],
    random_sized_crop=data_test_opt['random_sized_crop'])

dloader_train = DataLoader(
    dataset=dataset_train,
    batch_size=data_train_opt['batch_size'],
    unsupervised=data_train_opt['unsupervised'],
    epoch_size=data_train_opt['epoch_size'],
    num_workers=args_opt.num_workers,
    shuffle=True)

dloader_test = DataLoader(
    dataset=dataset_test,
    batch_size=data_test_opt['batch_size'],
    unsupervised=data_test_opt['unsupervised'],
    epoch_size=data_test_opt['epoch_size'],
    num_workers=args_opt.num_workers,
    shuffle=False)

config['disp_step'] = args_opt.disp_step
algorithm = getattr(alg, config['algorithm_type'])(config)
if args_opt.cuda: # enable cuda
Esempio n. 14
0
    if args.train:

        #---create datasets---#
        dataset = Dataset(args.dataset_path,
                          args.index_path,
                          seg_len=hps.seg_len)
        sourceset = Dataset(args.dataset_path,
                            args.index_source_path,
                            seg_len=hps.seg_len)
        targetset = Dataset(args.dataset_path,
                            args.index_target_path,
                            seg_len=hps.seg_len)

        #---create data loaders---#
        data_loader = DataLoader(dataset, hps.batch_size)
        source_loader = DataLoader(sourceset, hps.batch_size)
        target_loader = DataLoader(targetset, hps.batch_size)

        #---handle paths---#
        os.makedirs(args.ckpt_dir, exist_ok=True)
        model_path = os.path.join(args.ckpt_dir, args.model_name)

        #---initialize trainer---#
        trainer = Trainer(hps, data_loader, args.targeted_G, args.one_hot,
                          args.binary_output, args.binary_ver)
        if args.load_model:
            trainer.load_model(os.path.join(args.ckpt_dir,
                                            args.load_train_model_name),
                               model_all=False)
Esempio n. 15
0
def train(model_config,
          data_config,
          output_path,
          device,
          epoch_size,
          max_epoch,
          batch_size,
          repeats,
          decade_rate,
          clip_grad,
          log_every,
          valid_every,
          learning_rate=0.0005):
    print('use device: %s' % device, file=sys.stderr)
    vocab = Vocab.load(data_config["vacab_file"])
    model = NMT(vocab=vocab, **model_config)
    model = model.to(torch.device(device))
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    data_config.pop("vacab_file", None)
    data_loader = DataLoader(**data_config)
    batch_queue, loss_queue = data_loader.load_train_data(
        epoch_size, max_epoch, batch_size, repeats, decade_rate)
    dev_data = data_loader.load_dev_data()

    hist_valid_scores = []
    train_losses = []
    train_iter = cum_loss = report_loss = cum_tgt_words = report_tgt_words = 0
    cum_examples = report_examples = epoch = valid_num = 0

    if os.path.isfile(output_path + '/speech-to-text.model'):
        print('loading saved model...')
        params = torch.load(output_path + '/speech-to-text.model',
                            map_location=lambda storage, loc: storage)
        model.load_state_dict(params['state_dict'])
        print('restoring parameters of the optimizers', file=sys.stderr)
        optimizer.load_state_dict(
            torch.load(output_path + '/speech-to-text.optim'))
        dev_ppl = evaluate_ppl(
            model, dev_data,
            batch_size=128)  # dev batch size can be a bit larger
        valid_metric = -dev_ppl
        hist_valid_scores.append(valid_metric)
        print("saved model ppl: ", dev_ppl)

    model.train()

    train_time = begin_time = time.time()
    epoch, voices, tgt_sents = batch_queue.get(True)
    while voices is not None and tgt_sents is not None:
        train_iter += 1
        optimizer.zero_grad()
        # print("received voices:", len(voices))
        # print("tgt_sents[0]:", len(tgt_sents[0]), tgt_sents[0])
        # print("tgt_sents[1]:", len(tgt_sents[1]), tgt_sents[1])
        optimizer.zero_grad()
        batch_size = len(voices)
        sample_losses = -model(voices, tgt_sents)
        batch_loss = sample_losses.sum()
        loss = batch_loss / batch_size
        loss.backward()

        # clip gradient
        grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(),
                                                   clip_grad)
        optimizer.step()

        batch_losses_val = batch_loss.item()
        report_loss += batch_losses_val
        cum_loss += batch_losses_val

        tgt_words_num_to_predict = sum(
            len(s[1:]) for s in tgt_sents)  # omitting leading `<s>`
        report_tgt_words += tgt_words_num_to_predict
        cum_tgt_words += tgt_words_num_to_predict
        report_examples += batch_size
        cum_examples += batch_size
        loss_queue.put(report_loss / report_examples)
        train_losses.append({
            'epoch':
            epoch,
            'iter':
            train_iter,
            'loss':
            report_loss / report_examples,
            'ppl':
            math.exp(report_loss / report_tgt_words),
            'cum':
            cum_examples,
            'speed':
            report_tgt_words / (time.time() - train_time)
        })

        if train_iter % log_every == 0:
            print(
                'epoch %d, iter %d, avg. loss %.2f, avg. ppl %.2f '
                'cum. examples %d, speed %.2f words/sec, time elapsed %.2f sec'
                % (epoch, train_iter, report_loss / report_examples,
                   math.exp(report_loss / report_tgt_words), cum_examples,
                   report_tgt_words /
                   (time.time() - train_time), time.time() - begin_time),
                file=sys.stderr)

            train_time = time.time()
            report_loss = report_tgt_words = report_examples = 0.
        # perform validation
        if train_iter % valid_every == 0:
            print(
                'epoch %d, iter %d, cum. loss %.2f, cum. ppl %.2f cum. examples %d'
                % (epoch, train_iter, cum_loss / cum_examples,
                   np.exp(cum_loss / cum_tgt_words), cum_examples),
                file=sys.stderr)

            cum_loss = cum_examples = cum_tgt_words = 0.
            valid_num += 1

            print('begin validation ...', file=sys.stderr)

            # compute dev. ppl and bleu
            dev_ppl = evaluate_ppl(
                model, dev_data,
                batch_size=128)  # dev batch size can be a bit larger
            valid_metric = -dev_ppl

            print('validation: iter %d, dev. ppl %f' % (train_iter, dev_ppl),
                  file=sys.stderr)

            is_better = len(hist_valid_scores
                            ) == 0 or valid_metric > max(hist_valid_scores)
            hist_valid_scores.append(valid_metric)

            if is_better:
                patience = 0
                print('save currently the best model to [%s]' % output_path,
                      file=sys.stderr)
                model.save(output_path + '/speech-to-text.model')
                torch.save(optimizer.state_dict(),
                           output_path + '/speech-to-text.optim')

        epoch, voices, tgt_sents = batch_queue.get(True)
Esempio n. 16
0
File: util.py Progetto: amsqr/hd
def submit(ypred):
    from dataloader import DataLoader
    dataloader = DataLoader(config) 
    df_test = dataloader.load_test_data()
    id_test = df_test['id']
    pd.DataFrame({"id": id_test, "relevance": ypred}).to_csv('submission.csv',index=False)
Esempio n. 17
0
    def __init__(self,
                 dataset_name="celeba",
                 img_size=28,
                 channels=3,
                 backup_dir="backup"):

        self.dataset_name = dataset_name
        self.img_size = img_size
        self.channels = channels

        self.backup_dir = backup_dir
        self.time = time()

        # Input shape
        self.latent_dim = 100
        self.learning_rate = 1e-3

        self.gf = 64  # filter size of generator's last layer
        self.df = 64  # filter size of discriminator's first layer

        optimizer_disc = Adam(self.learning_rate / 10,
                              beta_1=0.5,
                              decay=0.00005)
        optimizer_gen = Adam(self.learning_rate, beta_1=0.5, decay=0.00005)

        # Configure data loader
        self.dl = DataLoader(dataset_name=self.dataset_name,
                             img_res=(self.img_size, self.img_size),
                             mem_load=True)
        self.n_data = self.dl.get_n_data()

        # Build generator
        self.generator = self.build_generator()
        print(
            "---------------------generator summary----------------------------"
        )
        self.generator.summary()

        # Build and compile the discriminator
        self.discriminator = self.build_discriminator()
        print(
            "\n---------------------discriminator summary----------------------------"
        )
        self.discriminator.summary()

        self.discriminator.compile(loss='binary_crossentropy',
                                   optimizer=optimizer_disc,
                                   metrics=['accuracy'])

        z = Input(shape=(self.latent_dim, ))
        fake_img = self.generator(z)

        # for the combined model, we only train ganerator
        self.discriminator.trainable = False

        validity = self.discriminator(fake_img)

        # Build combined model
        self.combined = Model(z, validity)
        print(
            "\n---------------------combined summary----------------------------"
        )
        self.combined.summary()
        self.combined.compile(loss=['binary_crossentropy'],
                              optimizer=optimizer_gen)
Esempio n. 18
0
        sys.stdout.flush()

        gt, vm = data_utils.load_depth_with_validity_map(gt_paths[idx])
        gt = np.concatenate(
            [np.expand_dims(gt, axis=-1),
             np.expand_dims(vm, axis=-1)],
            axis=-1)
        gt_arr.append(gt)

    print('Completed loading {} groundtruth depth maps'.format(n_sample))

with tf.Graph().as_default():
    # Initialize dataloader
    dataloader = DataLoader(
        shape=[args.n_batch, args.n_height, args.n_width, 3],
        name='dataloader',
        is_training=False,
        n_thread=args.n_thread,
        prefetch_size=2 * args.n_thread)
    # Fetch the input from dataloader
    im0 = dataloader.next_element[0]
    sz0 = dataloader.next_element[3]

    # Build computation graph
    model = VOICEDModel(im0,
                        im0,
                        im0,
                        sz0,
                        None,
                        is_training=False,
                        occ_threshold=args.occ_threshold,
                        occ_ksize=args.occ_ksize,
Esempio n. 19
0
import utils
from dataloader import DataLoader
from model import Model
import tensorflow as tf
import os
import numpy as np

# set batch size
test_batch_size = 52
# create dataloader object. this will be used to get the testing examples
dataloader = DataLoader('dataset.h5', test_batch_size, train=False)

# create the model
model = Model(test_batch_size)

# create tensorflow session
sess = tf.InteractiveSession()

# create new saver to restore the model
saver = tf.train.Saver()

# if there is a checkpoint file, load it and populate the weights and biases.
if os.path.isfile('checkpoints/model.ckpt'):
    saver.restore(sess, 'checkpoints/model.ckpt')
    print("Model restored!")
else:
    # otherwise, set all parameters to a small random value
    sess.run(tf.initialize_all_variables())

# making sure read pointer is at zero
dataloader.reset_read_pointer()
Esempio n. 20
0
class DCGAN():
    def __init__(self,
                 dataset_name="celeba",
                 img_size=28,
                 channels=3,
                 backup_dir="backup"):

        self.dataset_name = dataset_name
        self.img_size = img_size
        self.channels = channels

        self.backup_dir = backup_dir
        self.time = time()

        # Input shape
        self.latent_dim = 100
        self.learning_rate = 1e-3

        self.gf = 64  # filter size of generator's last layer
        self.df = 64  # filter size of discriminator's first layer

        optimizer_disc = Adam(self.learning_rate / 10,
                              beta_1=0.5,
                              decay=0.00005)
        optimizer_gen = Adam(self.learning_rate, beta_1=0.5, decay=0.00005)

        # Configure data loader
        self.dl = DataLoader(dataset_name=self.dataset_name,
                             img_res=(self.img_size, self.img_size),
                             mem_load=True)
        self.n_data = self.dl.get_n_data()

        # Build generator
        self.generator = self.build_generator()
        print(
            "---------------------generator summary----------------------------"
        )
        self.generator.summary()

        # Build and compile the discriminator
        self.discriminator = self.build_discriminator()
        print(
            "\n---------------------discriminator summary----------------------------"
        )
        self.discriminator.summary()

        self.discriminator.compile(loss='binary_crossentropy',
                                   optimizer=optimizer_disc,
                                   metrics=['accuracy'])

        z = Input(shape=(self.latent_dim, ))
        fake_img = self.generator(z)

        # for the combined model, we only train ganerator
        self.discriminator.trainable = False

        validity = self.discriminator(fake_img)

        # Build combined model
        self.combined = Model(z, validity)
        print(
            "\n---------------------combined summary----------------------------"
        )
        self.combined.summary()
        self.combined.compile(loss=['binary_crossentropy'],
                              optimizer=optimizer_gen)

    def build_generator(self):
        noise = Input(shape=(self.latent_dim, ))

        def deconv2d(layer_input,
                     filters=256,
                     kernel_size=(5, 5),
                     strides=(2, 2),
                     bn_relu=True):
            """Layers used during upsampling"""
            u = Conv2DTranspose(filters,
                                kernel_size=kernel_size,
                                strides=strides,
                                padding='same',
                                use_bias=False)(layer_input)
            if bn_relu:
                u = BatchNormalization()(u)
                u = Activation("relu")(u)

            return u

        generator = Dense(4 * self.gf * self.img_size // 4 * self.img_size //
                          4,
                          use_bias=False)(noise)
        generator = BatchNormalization()(generator)
        generator = Activation('relu')(generator)
        generator = Reshape(
            (self.img_size // 4, self.img_size // 4, self.gf * 4))(generator)
        generator = deconv2d(generator, filters=self.gf * 2)
        generator = deconv2d(generator, filters=self.gf)
        generator = deconv2d(generator,
                             filters=self.channels,
                             kernel_size=(3, 3),
                             strides=(1, 1),
                             bn_relu=False)

        generator = Activation('tanh')(generator)

        return Model(noise, generator)

    def build_discriminator(self):
        def d_block(layer_input, filters, strides=1, bn=True):
            """Discriminator layer"""
            d = Conv2D(filters,
                       kernel_size=3,
                       strides=strides,
                       padding='same',
                       use_bias=False)(layer_input)
            if bn:
                d = Dropout(rate=0.8)(d)
            d = LeakyReLU(alpha=0.2)(d)

            return d

        # Input img = generated image
        d0 = Input(shape=(self.img_size, self.img_size, self.channels))

        d = d_block(d0, self.df, strides=2, bn=False)
        d = d_block(d, self.df * 2, strides=2)

        d = Flatten()(d)
        validity = Dense(1, activation='sigmoid')(d)

        return Model(d0, validity)

    def train(self, epochs, batch_size, sample_interval):
        def named_logs(model, logs):
            result = {}
            for l in zip(model.metrics_names, logs):
                result[l[0]] = l[1]
            return result

        start_time = datetime.datetime.now()

        valid = np.ones((batch_size, 1))
        fake = np.zeros((batch_size, 1))

        max_iter = int(self.n_data / batch_size)
        os.makedirs(f"{self.backup_dir}/logs/{self.time}", exist_ok=True)
        tensorboard = TensorBoard(f"{self.backup_dir}/logs/{self.time}")
        tensorboard.set_model(self.generator)

        os.makedirs(f"{self.backup_dir}/models/{self.time}/", exist_ok=True)
        with open(
                f"{self.backup_dir}/models/{self.time}/generator_architecture.json",
                "w") as f:
            f.write(self.generator.to_json())
        print(
            f"\nbatch size : {batch_size} | num_data : {self.n_data} | max iteration : {max_iter} | time : {self.time} \n"
        )
        for epoch in range(1, epochs + 1):
            for iter in range(max_iter):
                # ------------------
                #  Train Generator
                # ------------------
                ref_imgs = self.dl.load_data(batch_size)

                noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
                gen_imgs = self.generator.predict(noise)
                make_trainable(self.discriminator, True)
                d_loss_real = self.discriminator.train_on_batch(
                    ref_imgs, valid * 0.9)  # label smoothing *0.9
                d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
                d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

                make_trainable(self.discriminator, False)

                logs = self.combined.train_on_batch([noise], [valid])
                tensorboard.on_epoch_end(iter,
                                         named_logs(self.combined, [logs]))

                if iter % (sample_interval // 10) == 0:
                    elapsed_time = datetime.datetime.now() - start_time
                    print(
                        f"epoch:{epoch} | iter : {iter} / {max_iter} | time : {elapsed_time} | g_loss : {logs} | d_loss : {d_loss} "
                    )

                if (iter + 1) % sample_interval == 0:
                    self.sample_images(epoch, iter + 1)

            # save weights after every epoch
            self.generator.save_weights(
                f"{self.backup_dir}/models/{self.time}/generator_epoch{epoch}_weights.h5"
            )

    def sample_images(self, epoch, iter):
        os.makedirs(f'{self.backup_dir}/samples/{self.time}', exist_ok=True)
        r, c = 5, 5
        noise = np.random.normal(0, 1, (r * c, self.latent_dim))
        gen_img = self.generator.predict(noise)

        # Rescale images 0 - 1
        gen_img = 0.5 * gen_img + 0.5

        # Save generated images and the high resolution originals
        fig, axs = plt.subplots(r, c)
        for row in range(r):
            for col in range(c):
                axs[row, col].imshow(gen_img[5 * row + col, :, :, :])
                axs[row, col].axis('off')
        fig.savefig(
            f"{self.backup_dir}/samples/{self.time}/e{epoch}-i{iter}.png",
            bbox_inches='tight',
            dpi=100)
        # plt.show() # only when running in ipython, otherwise halts the execution
        plt.close()
Esempio n. 21
0
# Placeholder and variables
# TODO : declare placeholder and variables

# Build model
# TODO : build your model here

# Loss and optimizer
# TODO : declare loss and optimizer operation
# Train and evaluate
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()


    if is_train_mode:
        train_dataloader = DataLoader(file_path='dataset/track_metadata.csv', batch_size=batch_size,
                                      label_column_name=label_column_name, is_training=True)

        for epoch in range(epoch_num):
            total_batch = train_dataloader.num_batch

            for i in range(total_batch):
                batch_x, batch_y = train_dataloader.next_batch()
                # TODO:  do some train step code here

        print('Training finished !')
        output_dir = checkpoint_path + '/run-%02d%02d-%02d%02d' % tuple(localtime(time()))[1:5]
        if not gfile.Exists(output_dir):
            gfile.MakeDirs(output_dir)
        saver.save(sess, output_dir)
        print('Model saved in file : %s'%output_dir)
    else:
def main(args):
    device = Config.device
    print("PyTorch running with device {0}".format(device))

    if args.download:
        print("Downloading data")
        download_required_data()

    if args.lemmatize:
        caption_file = 'data/Flickr_Data/Flickr_TextData/Flickr8k.lemma.token.txt'
    else:
        caption_file = 'data/Flickr_Data/Flickr_TextData/Flickr8k.token.txt'

    print("Generating word2id")
    word2id = generate_word2id(caption_file)
    id2word = dict([(v, k) for k, v in word2id.items()])

    print("Loading Encoder and Decoder")
    encoder = Encoder(Config.encoded_size, Config.encoder_finetune)
    decoder = Decoder(Config.encoder_dim,
                      Config.decoder_dim,
                      Config.attention_dim,
                      Config.embed_dim,
                      vocab_size=len(word2id),
                      dropout=Config.dropout,
                      embedding_finetune=Config.embedding_finetune)

    if args.model_path:
        print("Loading model from model_path")
        load_model(encoder, decoder, args.model_path)
    else:
        # no model path, so load pretrained embedding
        print("Generating embedding from pretrained embedding file")
        embedding = load_pretrained_embedding(
            'data/glove.6B.{}d.txt'.format(Config.embed_dim), word2id,
            Config.embed_dim)
        decoder.load_embedding(embedding)

    if not args.test:
        # train
        print("Loading DataLoader and Trainer")
        dloader = DataLoader(caption_file, 'data/Flickr_Data/Images')
        trainer = Trainer(encoder, decoder, dloader)

        print("Start Training")
        loss_history = trainer.train(Config.num_epochs)
        plt.plot(np.arange(len(loss_history)), loss_history, label='Loss')
        plt.legend()
        plt.show()

    else:
        # test
        assert args.image_path

        encoder.eval()
        decoder.eval()

        transform = transforms.Compose([
            transforms.Resize(224),
            transforms.CenterCrop(224),
            transforms.ToTensor()
        ])

        image = transform(Image.open(args.image_path))
        image = image.unsqueeze(0)

        # TODO
        # generate caption from an image
        encoder_output = encoder(image)
        captions, alphas = decoder.generate_caption_greedily(encoder_output)

        caption_in_word = ' '.join(list(map(id2word.get, captions[1:])))
        plt.imshow(image[0].numpy().transpose(1, 2, 0))
        plt.title(caption_in_word)
        plt.axis('off')
        plt.show()

        print(caption_in_word)
Esempio n. 23
0
import numpy
import pandas
from pandas import *

import matplotlib
import matplotlib.pyplot as plt

matplotlib.style.use('ggplot')

from dataloader import DataLoader

loader = DataLoader()

from __future__ import division

data_key = loader.get_key_data()

ascii_num = 99

raw_of_all = data_key[data_key['intent_code_point']==ascii_num]
raw_of_alphabet = data_key[data_key['intent_code_point']==ascii_num][data_key['code_point']!=ascii_num]

hands = ['Both', 'Right', 'Left']

result = []

for hand in hands:
    data_all = raw_of_all[raw_of_all['input_posture']==hand]
    data_all = data_all.groupby('keyboard_condition').count()['time']

    data_of_alphabet = raw_of_alphabet[raw_of_alphabet['input_posture']==hand]
use_cuda = True

if use_cuda:
    cuda.empty_cache()

""" training mode"""
results = []
f = 3

model = CDKT()
if use_cuda:
    model = model.cuda()

optimizer = optim.Adam(model.parameters(),5*1e-4)
DL = DataLoader(read_data(f'/data/train.{f}.dat'),load_init())
for r in range(10): # 20-epochs
    i = 0
    for x,y in DL.samples(72):
        X = tensor(x)
        Y = tensor(y)
        if use_cuda:
            X = X.cuda()
            Y = Y.cuda()
        loss = model.forward(X,Y,True)
        
        optimizer.zero_grad()
        clip_grad_value_(model.parameters(),10)
        loss.backward()
        optimizer.step()
        
Esempio n. 25
0
    if opt.method == 'policy-MPUR':
        policy_network_mpur = torch.load(model_path)['model']
        policy_network_mpur.stats = stats
        forward_model.policy_net = policy_network_mpur.policy_net
        forward_model.policy_net.stats = stats
        forward_model.policy_net.actor_critic = False

    forward_model.intype('gpu')
    forward_model.stats = stats
    if 'ten' in opt.mfile:
        forward_model.p_z = torch.load(
            path.join(opt.model_dir, f'{opt.mfile}.pz'))
    return forward_model, value_function, policy_network_il, policy_network_mper, stats


dataloader = DataLoader(None, opt, 'i80')
forward_model, value_function, policy_network_il, policy_network_mper, data_stats = load_models(
)
splits = torch.load(path.join(data_path, 'splits.pth'))

if opt.u_reg > 0.0:
    forward_model.train()
    forward_model.opt.u_hinge = opt.u_hinge
    if hasattr(forward_model, 'value_function'):
        forward_model.value_function.train()
    planning.estimate_uncertainty_stats(forward_model,
                                        dataloader,
                                        n_batches=50,
                                        npred=opt.npred)

gym.envs.registration.register(id='I-80-v1',
Esempio n. 26
0
def train(trn_im0_path,
          trn_im1_path,
          n_epoch=settings.N_EPOCH,
          n_batch=settings.N_BATCH,
          n_height=settings.N_HEIGHT,
          n_width=settings.N_WIDTH,
          n_channel=settings.N_CHANNEL,
          learning_rates=settings.LEARNING_RATES,
          learning_bounds=settings.LEARNING_BOUNDS,
          n_pyramid=settings.N_PYRAMID,
          max_disparity=settings.MAX_DISPARITY,
          w_ph=settings.W_PH,
          w_st=settings.W_ST,
          w_sm=settings.W_SM,
          w_bc=settings.W_BC,
          w_ar=settings.W_AR,
          n_checkpoint=settings.N_CHECKPOINT,
          n_summary=settings.N_SUMMARY,
          checkpoint_path=settings.CHECKPOINT_PATH,
          restore_path=settings.RESTORE_PATH,
          n_gpu=settings.N_GPU,
          n_thread=settings.N_THREAD):

    event_path = os.path.join(checkpoint_path, 'event')
    model_path = os.path.join(checkpoint_path, 'model.ckpt')
    log_path = os.path.join(checkpoint_path, 'results.txt')

    # Load image paths from paths file for training and validation
    trn_im0_paths = data_utils.read_paths(trn_im0_path)
    trn_im1_paths = data_utils.read_paths(trn_im1_path)
    n_trn_sample = len(trn_im0_paths)
    n_trn_step = n_epoch * np.ceil(n_trn_sample / n_batch).astype(np.int32)

    with tf.Graph().as_default():
        global_step = tf.Variable(0, trainable=False)
        # Initialize optimizer
        boundaries = [np.int32(b * n_trn_step) for b in learning_bounds]
        learning_rate = tf.train.piecewise_constant(global_step, boundaries,
                                                    learning_rates)
        optimizer = tf.train.AdamOptimizer(learning_rate)
        # Initialize dataloader
        dataloader = DataLoader(shape=[n_batch, n_height, n_width, n_channel],
                                name='dataloader',
                                n_thread=n_thread,
                                prefetch_size=8,
                                normalize=True,
                                random_flip=True,
                                random_gamma=True,
                                gamma_range=[0.8, 1.2],
                                random_brightness=True,
                                brightness_range=[0.5, 2.0],
                                random_color=True,
                                color_range=[0.8, 1.2])
        # Split data into towers for each GPU
        im0_split = tf.split(dataloader.next_element[0], n_gpu, 0)
        im1_split = tf.split(dataloader.next_element[1], n_gpu, 0)
        # Build computation graph
        tower_gradients = []
        tower_losses = []
        with tf.variable_scope(tf.get_variable_scope()):
            for i in range(n_gpu):
                with tf.device('/gpu:%d' % i):
                    params = []
                    model = MonoDispNet(im0_split[i],
                                        im1_split[i],
                                        n_pyramid=n_pyramid,
                                        w_ph=w_ph,
                                        w_st=w_st,
                                        w_sm=w_sm,
                                        w_bc=w_bc,
                                        w_ar=w_ar,
                                        max_disparity=max_disparity,
                                        reuse_variables=tf.AUTO_REUSE,
                                        model_index=i)
                    loss = model.total_loss
                    tower_losses.append(loss)
                    tower_gradients.append(optimizer.compute_gradients(loss))
        # Set up gradient computations
        avg_gradients = average_gradients(tower_gradients)
        gradients = optimizer.apply_gradients(avg_gradients,
                                              global_step=global_step)

        total_loss = tf.reduce_mean(tower_losses)

        tf.summary.scalar('learning_rate', learning_rate, ['model_0'])
        tf.summary.scalar('total_loss', total_loss, ['model_0'])
        trn_summary = tf.summary.merge_all('model_0')

        # Count trainable parameters
        n_parameter = 0
        for variable in tf.trainable_variables():
            n_parameter += np.array(variable.get_shape().as_list()).prod()
        # Log network parameters
        log('Network Parameters:', log_path)
        log(
            'n_batch=%d  n_height=%d  n_width=%d  n_channel=%d  ' %
            (n_batch, n_height, n_width, n_channel), log_path)
        log('n_pyramid=%d  max_disparity=%.3f' % (n_pyramid, max_disparity),
            log_path)
        log(
            'n_sample=%d  n_epoch=%d  n_step=%d  n_param=%d' %
            (n_trn_sample, n_epoch, n_trn_step, n_parameter), log_path)
        log(
            'learning_rates=[%s]' % ', '.join('{:.6f}'.format(r)
                                              for r in learning_rates),
            log_path)
        log(
            'boundaries=[%s]' %
            ', '.join('{:.2f}:{}'.format(l, b)
                      for l, b in zip(learning_bounds, boundaries)), log_path)
        log(
            'w_ph=%.3f  w_st=%.3f  w_sm=%.3f  w_bc=%.3f  w_ar=%.3f' %
            (w_ph, w_st, w_sm, w_bc, w_ar), log_path)
        log(
            'Restoring from: %s' %
            ('None' if restore_path == '' else restore_path), log_path)

        # Initialize Tensorflow session
        config = tf.ConfigProto(allow_soft_placement=True)
        config.gpu_options.allow_growth = True
        session = tf.Session(config=config)
        # Initialize saver for storing and restoring checkpoints
        summary_writer = tf.summary.FileWriter(model_path, session.graph)
        train_saver = tf.train.Saver()
        # Initialize all variables
        session.run(tf.global_variables_initializer())
        session.run(tf.local_variables_initializer())
        # If given, load the weights from the restore path
        if restore_path != '':
            train_saver.restore(session, restore_path)

        # Begin training
        log('Begin training...')
        start_step = global_step.eval(session=session)
        time_start = time.time()
        trn_step = start_step
        while trn_step < n_trn_step:
            trn_im0_paths_epoch, trn_im1_paths_epoch = data_utils.shuffle_and_drop(
                [trn_im0_paths, trn_im1_paths], n_batch)
            dataloader.initialize(session,
                                  im0_paths=trn_im0_paths_epoch,
                                  im1_paths=trn_im1_paths_epoch,
                                  augment=True)
            while trn_step < n_trn_step:
                try:
                    _, loss_value = session.run([gradients, total_loss])
                    if trn_step % n_summary == 0:
                        summary = session.run(trn_summary)
                        summary_writer.add_summary(summary,
                                                   global_step=trn_step)
                    if trn_step and trn_step % n_checkpoint == 0:
                        time_elapse = (time.time() -
                                       time_start) / 3600 * trn_step / (
                                           trn_step - start_step + 1)
                        time_remain = (n_trn_step / trn_step -
                                       1.0) * time_elapse

                        checkpoint_log = 'batch {:>6}  loss: {:.5f}  time elapsed: {:.2f}h  time left: {:.2f}h'
                        log(
                            checkpoint_log.format(trn_step, loss_value,
                                                  time_elapse, time_remain),
                            log_path)
                        train_saver.save(session,
                                         model_path,
                                         global_step=trn_step)
                    trn_step += 1
                except tf.errors.OutOfRangeError:
                    break

        train_saver.save(session, model_path, global_step=n_trn_step)
Esempio n. 27
0
def train(args, net, optimizer, criterion, scheduler = None):
    log_file = open(args.save_root + file_name + "_training.log", "w", 1)
    log_file.write(args.exp_name+'\n')
    for arg in vars(args):
        print(arg, getattr(args, arg))
        log_file.write(str(arg)+': '+str(getattr(args, arg))+'\n')
    log_file.write(str(net))
    net.train()

    # loss counters
    batch_time = AverageMeter()
    losses = AverageMeter()
    loc_losses = AverageMeter()
    cls_losses = AverageMeter()

    print('Loading Dataset...')
    train_dataset = UCF24Detection(args.data_root, args.train_sets, SSDAugmentation(args.ssd_dim, args.means),
                                   AnnotationTransform(), input_type=args.input_type)
    val_dataset = UCF24Detection(args.data_root, 'test', BaseTransform(args.ssd_dim, args.means),
                                 AnnotationTransform(), input_type=args.input_type,
                                 full_test=False)
    epoch_size = len(train_dataset) // args.batch_size
    print ("epoch_size: ", epoch_size)
    print('Training SSD on', train_dataset.name)

    if args.visdom:

        import visdom
        viz = visdom.Visdom()
        viz.port = 8097
        viz.env = args.exp_name
        # initialize visdom loss plot
        lot = viz.line(
            X=torch.zeros((1,)).cpu(),
            Y=torch.zeros((1, 6)).cpu(),
            opts=dict(
                xlabel='Iteration',
                ylabel='Loss',
                title='Current SSD Training Loss',
                legend=['REG', 'CLS', 'AVG', 'S-REG', ' S-CLS', ' S-AVG']
            )
        )
        # initialize visdom meanAP and class APs plot
        legends = ['meanAP']
        for cls in CLASSES:
            legends.append(cls)
        val_lot = viz.line(
            X=torch.zeros((1,)).cpu(),
            Y=torch.zeros((1,args.num_classes)).cpu(),
            opts=dict(
                xlabel='Iteration',
                ylabel='Mean AP',
                title='Current SSD Validation mean AP',
                legend=legends
            )
        )


    batch_iterator = None
    train_data_loader = DataLoader(train_dataset, args.batch_size, num_workers=args.num_workers,
                                  shuffle=False, collate_fn=detection_collate, pin_memory=True)
    val_data_loader = DataLoader(val_dataset, args.batch_size, num_workers=args.num_workers,
                                 shuffle=False, collate_fn=detection_collate, pin_memory=True)

    my_dict = copy.deepcopy(train_data_loader.dataset.train_vid_frame)
    keys = list(my_dict.keys())
    k_len = len(keys)
    arr = np.arange(k_len)

    xxx = copy.deepcopy(train_data_loader.dataset.ids)

    itr_count = 0
    torch.cuda.synchronize()
    t0 = time.perf_counter()
    # for iteration in range(args.max_iter + 1):
    current_epoch = 0
    iteration = 0
    while current_epoch < (args.total_epoch + 1):

        if (not batch_iterator) or (iteration % epoch_size == 0):
            xxxx = copy.deepcopy(train_data_loader.dataset.ids)
            np.random.shuffle(arr)
            iii = 0
            for arr_i in arr:
                key = keys[arr_i]
                rang = my_dict[key]
                xxxx[iii:(iii + rang[1] - rang[0])] = xxx[rang[0]:rang[1]]
                iii += rang[1] - rang[0]

            train_data_loader.dataset.ids = copy.deepcopy(xxxx)
            # create batch iterator
            batch_iterator = iter(train_data_loader)
            if scheduler is not None and iteration > 0:
                scheduler.step()
            current_epoch += 1

        iteration += 1
        # load train data
        images, targets, img_indexs = next(batch_iterator)
        if args.cuda:
            images = Variable(images.cuda())
            targets = [Variable(anno.cuda(), volatile=True) for anno in targets]
        else:
            images = Variable(images)
            targets = [Variable(anno, volatile=True) for anno in targets]
        # forward
        out = net(images, img_indexs)
        # backprop
        optimizer.zero_grad()

        loss_l, loss_c = criterion(out, targets)
        loss = loss_l + loss_c
        loss.backward()
        torch.nn.utils.clip_grad_norm(net.parameters(), args.clip)

        optimizer.step()

        loc_loss = loss_l.data[0]
        conf_loss = loss_c.data[0]
        # print('Loss data type ',type(loc_loss))
        loc_losses.update(loc_loss)
        cls_losses.update(conf_loss)
        losses.update((loc_loss + conf_loss)/2.0)

        if iteration % args.print_step == 0 and iteration > 0:
            if args.visdom:
                losses_list = [loc_losses.val, cls_losses.val, losses.val, loc_losses.avg, cls_losses.avg, losses.avg]
                viz.line(X=torch.ones((1, 6)).cpu() * iteration,
                    Y=torch.from_numpy(np.asarray(losses_list)).unsqueeze(0).cpu(),
                    win=lot,
                    update='append')

            torch.cuda.synchronize()
            t1 = time.perf_counter()
            batch_time.update(t1 - t0)

            print_line = 'Epoch {:02d}/{:02d} Iteration {:06d}/{:06d} loc-loss {:.3f}({:.3f}) cls-loss {:.3f}({:.3f}) ' \
                         'average-loss {:.3f}({:.3f}) Timer {:0.3f}({:0.3f}) lr {:0.5f}'.format(
                          current_epoch, args.total_epoch, iteration, args.max_iter, loc_losses.val, loc_losses.avg, cls_losses.val,
                          cls_losses.avg, losses.val, losses.avg, batch_time.val, batch_time.avg, args.lr)

            torch.cuda.synchronize()
            t0 = time.perf_counter()
            log_file.write(print_line+'\n')
            print(print_line)

            # if args.visdom and args.send_images_to_visdom:
            #     random_batch_index = np.random.randint(images.size(0))
            #     viz.image(images.data[random_batch_index].cpu().numpy())
            itr_count += 1

            if itr_count % args.loss_reset_step == 0 and itr_count > 0:
                loc_losses.reset()
                cls_losses.reset()
                losses.reset()
                batch_time.reset()
                print('Reset accumulators of ', args.exp_name,' at', itr_count*args.print_step)
                itr_count = 0

        if (iteration % args.eval_step == 0 or iteration == 5000) and iteration > 0:
            torch.cuda.synchronize()
            tvs = time.perf_counter()
            print('Saving state, iter:', iteration)
            torch.save(net.state_dict(), args.save_root + file_name + '_ssd300_ucf24_' +
                       repr(iteration) + '.pth')

            net.eval() # switch net to evaluation mode
            mAP, ap_all, ap_strs = validate(args, net, val_data_loader, val_dataset, iteration, iou_thresh=args.iou_thresh)

            for ap_str in ap_strs:
                print(ap_str)
                log_file.write(ap_str+'\n')
            ptr_str = '\nMEANAP:::=>'+str(mAP)+'\n'
            print(ptr_str)
            log_file.write(ptr_str)

            if args.visdom:
                aps = [mAP]
                for ap in ap_all:
                    aps.append(ap)
                viz.line(
                    X=torch.ones((1, args.num_classes)).cpu() * iteration,
                    Y=torch.from_numpy(np.asarray(aps)).unsqueeze(0).cpu(),
                    win=val_lot,
                    update='append'
                        )
            net.train() # Switch net back to training mode
            torch.cuda.synchronize()
            t0 = time.perf_counter()
            prt_str = '\nValidation TIME::: {:0.3f}\n\n'.format(t0-tvs)
            print(prt_str)
            log_file.write(ptr_str)

    log_file.close()
Esempio n. 28
0
def main():
    args = parse_args()

    dataset = args.dataset
    model_name = args.model

    args.seed = random.randint(0, 1000000)

    output_folder = None
    if model_name == "adv":
        output_folder = conf.output_folder_bgnn_adv + "/" + str(dataset)
    elif model_name == "mlp":
        output_folder = conf.output_folder_bgnn_mlp + "/" + str(dataset)

    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    seed_file = output_folder + "/random_seed.txt"
    fs = open(seed_file, 'w')
    wstr = "%s" % str(args.seed)
    fs.write(wstr + "\n")
    fs.close()

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    device = torch.device(
        "cuda:0" if torch.cuda.is_available() and args.gpu else "cpu")

    torch.autograd.set_detect_anomaly(True)

    # load the bipartite graph data
    dataloader = DataLoader(args.dataset, link_pred=True, rec=False)
    model_config = {
        "attr_one_dim": dataloader.set_one.shape[1],
        "attr_two_dim": dataloader.set_two.shape[1],
        "set_one_size": dataloader.set_one.shape[0],
        "set_two_size": dataloader.set_two.shape[0],
        "epoch": args.epochs,
        "layer_depth": args.layer_depth,
        "device": device,
        "disc_hid_dim": args.dis_hidden,
        "learning_rate": args.lr,
        "weight_decay": args.weight_decay,
        "dropout": args.dropout,
        "batch_size": args.batch_size
    }

    if args.model == 'adv':
        # start training
        bgnn = BGNNAdversarial(dataloader, model_config)
        bgnn.adversarial_learning()
    elif args.model == 'mlp':
        bgnn = BGNNMLP()
        bgnn.relation_learning()

    with open('./output_model_tuning_link_prediction.txt', 'a') as f:
        f.write(
            "epoch: {}, layer_depth: {}, disc_hid_dim: {}, "
            "learning_rate: {}, weight_decay: {}, dropout: {}, batch_size: {}\n"
            .format(model_config['epoch'], model_config['layer_depth'],
                    model_config['disc_hid_dim'],
                    model_config['learning_rate'],
                    model_config['weight_decay'], model_config['dropout'],
                    model_config['batch_size']))
    f.close()
Esempio n. 29
0
    def get_name(self, name):
        """Get metrics pandas name

        Args:
            name: a string recording prefix of metrics name
        """
        namelist = []
        for frame in self._frame_list:
            namelist.append(name + '_' + str(frame))
        return namelist


if __name__ == '__main__':
    from config import args
    origin = DataLoader()
    input_base = args.origin_input_base
    path = os.path.join(input_base, 'test.txt')
    actor = args.actor
    origin.variable_operate(input_base, path, actor)
    origin_x_y = origin.variable_get_data_with_window(2, each_len=2, stride=12)

    pred = DataLoader()
    input_base = args.result_input_base
    path = os.path.join(input_base, 'data_out.txt')
    pred.operate(input_base, path, actor)
    pred_x_y = pred.get_data_with_window(4, 2)
    pred_info = pred.get_data()

    frame_list = [4, 10]
    eva = Evaluation(frame_list)
Esempio n. 30
0
opt.model_file += f'-lrtz={opt.lrt_z}'
opt.model_file += f'-updatez={opt.z_updates}'
opt.model_file += f'-inferz={opt.infer_z}'
opt.model_file += f'-learnedcost={opt.learned_cost}'
opt.model_file += f'-seed={opt.seed}'

if opt.value_model == '':
    opt.model_file += '-novalue'

if opt.learned_cost == 1:
    model.cost = torch.load(path.join(opt.model_dir,
                                      opt.mfile + '.cost.model'))['model']

print(f'[will save as: {opt.model_file}]')

dataloader = DataLoader(None, opt, opt.dataset)
model.train()
model.opt.u_hinge = opt.u_hinge
planning.estimate_uncertainty_stats(model,
                                    dataloader,
                                    n_batches=50,
                                    npred=opt.npred)
model.eval()


def train(nbatches, npred):
    model.train()
    model.policy_net.train()
    total_loss_c, total_loss_u, total_loss_l, total_loss_a, n_updates, grad_norm = 0, 0, 0, 0, 0, 0
    total_loss_policy = 0
    for j in range(nbatches):
Esempio n. 31
0
np.random.seed(opt.seed)
torch.manual_seed(opt.seed)

opt.save_video = (opt.save_video == 1)
opt.eval_dir = opt.model_dir + f'eval/'


print(f'[loading {opt.model_dir + opt.mfile}]')
model = torch.load(opt.model_dir + opt.mfile)
if type(model) is dict: model = model['model']
model = model.cuda()
model.eval()
# if opt.cuda == 1:
    # model.intype('gpu')

dataloader = DataLoader(None, opt, opt.dataset)
# model.opt.npred = opt.npred  # instruct the model about how many predictions we want it to produce
model.opt.alpha = 0

dirname = f'{opt.eval_dir}{opt.mfile}-nbatches={opt.n_batches}-npred={opt.npred}-nsample={opt.n_samples}'
if '-ten' in opt.mfile:
    dirname += f'-sampling={opt.sampling}'
    if opt.sampling == 'knn':
        dirname += f'-density={opt.graph_density}'
    elif opt.sampling == 'pdf':
        dirname += f'-nmixture={opt.n_mixture}'
        mfile_prior = f'{opt.model_dir}/{opt.mfile}-nfeature=128-lrt=0.0001-nmixture={opt.n_mixture}.prior'
        print(f'[loading prior model: {mfile_prior}]')
        model.prior = torch.load(mfile_prior).cuda()
    # load z vectors. Extract them if they are not already saved.
    pzfile = opt.model_dir + opt.mfile + '.pz'
Esempio n. 32
0
        captions_dict = load_captions(train_dir)
        vocab = Vocabulary(captions_dict, threshold)
        os.mkdir(model_dir)
        print("Directory '{model_name}' created to dump vocab.pkl.".format(
            model_name=model_name))
        with open(os.path.join(model_dir, 'vocab.pkl'), 'wb') as f:
            pickle.dump(vocab, f)
            print('Dictionary Dumped !')

    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    dataloader = DataLoader(train_dir, vocab, transform)
    data = dataloader.gen_data()
    print(train_dir + ' loaded !')

    vocab_size = vocab.index

    cnn = get_CNN(architecture=model_name, embedding_dim=args.embedding_dim)
    lstm = RNN(embedding_dim=args.embedding_dim,
               hidden_dim=args.hidden_dim,
               vocab_size=vocab_size)

    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    cnn.to(device)
    lstm.to(device)
Esempio n. 33
0
import pandas as pd
from dataloader import DataLoader

# full_df = DataLoader().load_pickle('full')

base_path = './Data/data_1-2019/'

# df_concat.append(DataLoader().load_adv(base_path + 'carat_1_2019.csv'))
# df_concat.append(DataLoader().load_adv(base_path + 'amnet_1_2019.csv'))
# df_concat.append(DataLoader().load_adv(base_path + 'deepblue_1_2019.csv'))
# df_concat.append(DataLoader().load_adv(base_path + 'dentsux_1_2019.csv'))
# df_concat.append(DataLoader().load_adv(base_path + 'iprospect_1_2019.csv'))
# df_concat.append(DataLoader().load_adv(base_path + 'vizeum_1_2019.csv'))

carat_df = DataLoader().load_adv(base_path + 'carat_1_2019.csv')
amnet_df = DataLoader().load_adv(base_path + 'amnet_1_2019.csv')
deepblue_df = DataLoader().load_adv(base_path + 'deepblue_1_2019.csv')
dentsux_df = DataLoader().load_adv(base_path + 'dentsux_1_2019.csv')
iprospect_df = DataLoader().load_adv(base_path + 'iprospect_1_2019.csv')
vizeum_df = DataLoader().load_adv(base_path + 'vizeum_1_2019.csv')

#Add columns CARAT
carat_df.insert(0, 'Macro DB', 'Advertmind')
carat_df.insert(1, 'DB', 'CARAT')
carat_df.insert(2, 'Macro Agencia', 'CARAT')
carat_df.insert(3, 'Agencia', 'CARAT')
#Add columns DEEPBLUE
deepblue_df.insert(0, 'Macro DB', 'Advertmind')
deepblue_df.insert(1, 'DB', 'DEEPBLUE')
deepblue_df.insert(2, 'Macro Agencia', 'CARAT')
Esempio n. 34
0
### TORCH SETUP ###
print('-> Using PyTorch', th.__version__)
th.manual_seed(args.seed)
if args.gpu: th.cuda.manual_seed(args.seed)
X_TENSOR = ''
if not args.gpu:
    X_TENSOR = 'torch.DoubleTensor' if args.double else 'torch.FloatTensor'
else:
    X_TENSOR = 'torch.cuda.DoubleTensor' if args.double else 'torch.cuda.FloatTensor'
    #th.set_default_tensor_type('torch.cuda.HalfTensor') # Bad Stability
th.set_default_tensor_type(X_TENSOR)
th.backends.cudnn.benchmark=True # enable cuDNN auto-tuner

### DataLoader ###
dataloader = DataLoader()
#dataloader.satellite(64, 'trainval', 100)

### Model ###
class Model(th.nn.Module):
    ''' Reference: caffe/examples/cifar10 # 70%
    https://github.com/tensorflow/models/blob/master/tutorials/image/cifar10/cifar10.py
    '''
    def __init__(self):
        super(Model, self).__init__()
        self.SEQ1 = th.nn.Sequential(OrderedDict([

          ('conv1', th.nn.Conv2d(3, 32, 5, stride=1, padding=2)),
          ('bn1',   th.nn.BatchNorm2d(32)),
          ('relu1', th.nn.ReLU()),
          ('pool1', th.nn.MaxPool2d(3, stride=2, padding=1)),
Esempio n. 35
0
CLASS = 10
W_LEN = 200
CUDA = 1
CRADLE_SIZE = 50
REPRO_SIZE = 20
UNION = 10

weights = np.load(save_npy_name)
weights = torch.from_numpy(weights).cuda()[:W_LEN]
weights2 = np.load(save_npy_name2)
weights2 = torch.from_numpy(weights2).cuda()[:W_LEN]
weights3 = np.load(save_npy_name3)
weights3 = torch.from_numpy(weights3).cuda()[:W_LEN]

dl = DataLoader(True, CUDA)
images, labels = dl.get_all()
dl_test = DataLoader(False, CUDA)
images_t, labels_t = dl_test.get_all()

images = get_images_output(weights, images)
images_t = get_images_output(weights, images_t)
images = get_images_output(weights2, images)
images_t = get_images_output(weights2, images_t)
images = get_images_output(weights3, images)
images_t = get_images_output(weights3, images_t)

images[images == 0] = -1
images_t[images_t == 0] = -1

images = images.repeat(REPRO_SIZE, 1, 1)
Esempio n. 36
0
 def __init__(self):
     DataLoader.__init__(self)
     self.read_train()
     self.read_test()
Esempio n. 37
0
def main():
    # Step 0: preparation
    writer = LogWriter(logdir="./log/scalar")
    place = paddle.fluid.CUDAPlace(0)
    with fluid.dygraph.guard(place):
        # Step 1: Define training dataloader
        image_folder = ""
        image_list_file = "dummy_data/fabric_list.txt"
        transform = Transform()  #Normalize2()  # [0,255]-->[0,1]
        x_data = DataLoader(image_folder, image_list_file, transform=transform)
        x_dataloader = fluid.io.DataLoader.from_generator(capacity=2,
                                                          return_list=True)
        x_dataloader.set_sample_generator(x_data, args.batch_size)

        total_batch = len(x_data) // args.batch_size

        # Step 2: Create model
        if args.net == "basic":
            D = Discriminator()
            G = Generator()
            E = Invertor()
        else:
            raise NotImplementedError(
                f"args.net: {args.net} is not Supported!")

        # Step 3: Define criterion and optimizer
        criterion = Basic_Loss
        D_optim = AdamOptimizer(learning_rate=args.lr,
                                parameter_list=D.parameters())
        G_optim = AdamOptimizer(learning_rate=args.lr,
                                parameter_list=G.parameters())
        E_optim = AdamOptimizer(learning_rate=args.lr,
                                parameter_list=E.parameters())

        G_loss_meter = AverageMeter()
        D_loss_meter = AverageMeter()
        E_loss_meter = AverageMeter()

        D.train()
        G.train()
        E.train()

        # Step 4: Slight Training
        iteration = -1
        is_slight_Train = True
        for epoch in range(1, args.epoch_num + 1):
            #optim Discriminator
            for (x, x_labels) in x_dataloader():
                n = x.shape[0]
                if is_slight_Train:
                    iteration += 1
                    x = fluid.layers.cast(x, dtype="float32")
                    x = fluid.layers.transpose(x, perm=[0, 3, 1, 2])
                    preds_x = D(x)
                    preds_x_array = preds_x.numpy()
                    #print("D(x),1",preds_array.shape, np.mean(preds_array))
                    writer.add_scalar(tag="D(x)=1",
                                      step=iteration,
                                      value=np.mean(preds_x_array))
                    if np.mean(preds_x_array) >= 0.98:
                        is_slight_Train = False

                    z = np.random.rand(n, 64)
                    zeros = np.zeros((n, 1))
                    z = to_variable(z)
                    zeros = to_variable(zeros)
                    z = fluid.layers.cast(z, dtype="float32")
                    zeros = fluid.layers.cast(zeros, dtype="int64")
                    preds_fx = D(G(z))
                    preds_fx_array = preds_fx.numpy()
                    writer.add_scalar(tag="D(G(z))=0",
                                      step=iteration,
                                      value=np.mean(preds_fx_array))
                    D_loss = criterion(preds_x, x_labels) + criterion(
                        preds_fx, zeros)
                    D_loss.backward()
                    D_optim.minimize(D_loss)
                    D.clear_gradients()
                    D_loss_meter.update(D_loss.numpy()[0], n)
                    writer.add_scalar(tag="D_loss",
                                      step=iteration,
                                      value=D_loss_meter.avg)
                    print(f"EPOCH[{epoch:03d}/{args.epoch_num:03d}], " +
                          f"STEP{iteration}, " +
                          f"Average D Loss: {D_loss_meter.avg:4f}, ")

                    z = np.random.rand(n, 64)
                    ones = np.ones((n, 1))
                    z = to_variable(z)
                    ones = to_variable(ones)
                    z = fluid.layers.cast(z, dtype="float32")
                    ones = fluid.layers.cast(ones, dtype="int64")
                    preds = D(G(z))
                    preds_array = preds.numpy()
                    writer.add_scalar(tag="D(G(z))=1",
                                      step=iteration,
                                      value=np.mean(preds_array))
                    G_loss = criterion(preds, ones)
                    G_loss.backward()
                    G_optim.minimize(G_loss)
                    G.clear_gradients()
                    G_loss_meter.update(G_loss.numpy()[0], n)
                    writer.add_scalar(tag="G_loss",
                                      step=iteration,
                                      value=G_loss_meter.avg)
                    print(f"EPOCH[{epoch:03d}/{args.epoch_num:03d}], " +
                          f"STEP{iteration}, " +
                          f"Average G Loss: {G_loss_meter.avg:4f}")

            if epoch % args.save_freq == 0 or epoch == args.epoch_num or not is_slight_Train:
                D_model_path = os.path.join(args.checkpoint_folder,
                                            f"D_{args.net}-Epoch-{epoch}")
                G_model_path = os.path.join(args.checkpoint_folder,
                                            f"G_{args.net}-Epoch-{epoch}")

                # save model and optmizer states
                model_dict = D.state_dict()
                fluid.save_dygraph(model_dict, D_model_path)
                optim_dict = D_optim.state_dict()
                fluid.save_dygraph(optim_dict, D_model_path)

                model_dict = G.state_dict()
                fluid.save_dygraph(model_dict, G_model_path)
                optim_dict = G_optim.state_dict()
                fluid.save_dygraph(optim_dict, G_model_path)

                print(
                    f'----- Save model: {D_model_path}.pdparams, {G_model_path}.pdparams'
                )
                if not is_slight_Train:
                    break

        # Step 5:  full training for Generator and Discriminator
        D_optim = AdamOptimizer(learning_rate=args.lr * 10,
                                parameter_list=D.parameters())
        G_optim = AdamOptimizer(learning_rate=args.lr * 10,
                                parameter_list=G.parameters())
        G_loss_meter = AverageMeter()
        D_loss_meter = AverageMeter()

        for epoch in range(1, args.epoch_num + 1):
            for (x, x_labels) in x_dataloader():
                n = x.shape[0]
                iteration += 1
                x = fluid.layers.cast(x, dtype="float32")
                x = fluid.layers.transpose(x, perm=[0, 3, 1, 2])
                preds1 = D(x)
                preds_array = preds1.numpy()
                writer.add_scalar(tag="D(x)=1",
                                  step=iteration,
                                  value=np.mean(preds_array))
                z = np.random.rand(n, 64)
                zeros = np.zeros((n, 1))
                z = to_variable(z)
                zeros = to_variable(zeros)
                z = fluid.layers.cast(z, dtype="float32")
                zeros = fluid.layers.cast(zeros, dtype="int64")
                preds2 = D(G(z))
                preds_array = preds2.numpy()
                #print("DG(z),0:",preds_array.shape, np.mean(preds_array))
                writer.add_scalar(tag="D(G(z))=0",
                                  step=iteration,
                                  value=np.mean(preds_array))
                D_loss = criterion(preds1, x_labels) + criterion(preds2, zeros)
                D_loss.backward()
                D_optim.minimize(D_loss)
                D.clear_gradients()
                D_loss_meter.update(D_loss.numpy()[0], n)
                writer.add_scalar(tag="D_loss",
                                  step=iteration,
                                  value=D_loss_meter.avg)
                print(f"EPOCH[{epoch:03d}/{args.epoch_num:03d}], " +
                      f"STEP{iteration}, " +
                      f"Average D Loss: {D_loss_meter.avg:4f} ")
                z = np.random.rand(n, 64)
                ones = np.ones((n, 1))
                z = to_variable(z)
                ones = to_variable(ones)
                z = fluid.layers.cast(z, dtype="float32")
                ones = fluid.layers.cast(ones, dtype="int64")
                preds = D(G(z))
                preds_array = preds.numpy()
                #print("DG(z),1:",preds_array.shape, np.mean(preds_array))
                writer.add_scalar(tag="D(G(z))=1",
                                  step=iteration,
                                  value=np.mean(preds_array))
                G_loss = criterion(preds, ones)
                G_loss.backward()
                G_optim.minimize(G_loss)
                G.clear_gradients()
                G_loss_meter.update(G_loss.numpy()[0], n)
                writer.add_scalar(tag="G_loss",
                                  step=iteration,
                                  value=G_loss_meter.avg)
                print(f"EPOCH[{epoch:03d}/{args.epoch_num:03d}], " +
                      f"STEP{iteration}, " +
                      f"Average G Loss: {G_loss_meter.avg:4f}")

            if epoch % args.save_freq == 0 or epoch == args.epoch_num:
                D_model_path = os.path.join(args.checkpoint_folder,
                                            f"D_{args.net}-Epoch-{epoch}")
                G_model_path = os.path.join(args.checkpoint_folder,
                                            f"G_{args.net}-Epoch-{epoch}")

                # save model and optmizer states
                model_dict = D.state_dict()
                fluid.save_dygraph(model_dict, D_model_path)
                optim_dict = D_optim.state_dict()
                fluid.save_dygraph(optim_dict, D_model_path)

                model_dict = G.state_dict()
                fluid.save_dygraph(model_dict, G_model_path)
                optim_dict = G_optim.state_dict()
                fluid.save_dygraph(optim_dict, G_model_path)
                print(
                    f'----- Save model: {D_model_path}.pdparams, {G_model_path}.pdparams'
                )

        # Step 6: full training for Inverter
        E_optim = AdamOptimizer(learning_rate=args.lr * 10,
                                parameter_list=E.parameters())
        E_loss_meter = AverageMeter()

        for epoch in range(1, args.epoch_num + 1):
            for (x, x_labels) in x_dataloader():
                n = x.shape[0]
                iteration += 1
                x = fluid.layers.cast(x, dtype="float32")
                image = x.numpy()[0] * 255
                writer.add_image(tag="x", step=iteration, img=image)
                x = fluid.layers.transpose(x, perm=[0, 3, 1, 2])
                invert_x = G(E(x))
                invert_image = fluid.layers.transpose(invert_x,
                                                      perm=[0, 2, 3, 1])
                invert_image = invert_image.numpy()[0] * 255
                #print("D(x),1",preds_array.shape, np.mean(preds_array))
                writer.add_image(tag="invert_x",
                                 step=iteration,
                                 img=invert_image)
                print(np.max(invert_image), np.min(invert_image))
                E_loss = fluid.layers.mse_loss(invert_x, x)
                print("E_loss shape:", E_loss.numpy().shape)
                E_loss.backward()
                E_optim.minimize(E_loss)
                E.clear_gradients()
                E_loss_meter.update(E_loss.numpy()[0], n)
                writer.add_scalar(tag="E_loss",
                                  step=iteration,
                                  value=E_loss_meter.avg)
                print(f"EPOCH[{epoch:03d}/{args.epoch_num:03d}], " +
                      f"STEP{iteration}, " +
                      f"Average E Loss: {E_loss_meter.avg:4f}, ")

            if epoch % args.save_freq == 0 or epoch == args.epoch_num:
                E_model_path = os.path.join(args.checkpoint_folder,
                                            f"E_{args.net}-Epoch-{epoch}")
                # save model and optmizer states
                model_dict = E.state_dict()
                fluid.save_dygraph(model_dict, E_model_path)
                optim_dict = E_optim.state_dict()
                fluid.save_dygraph(optim_dict, E_model_path)
                print(
                    f'----- Save model: {E_model_path}.pdparams, {E_model_path}.pdparams'
                )