Esempio n. 1
0
def train(parameters):
    model_folder = setup_log(parameters, 'train')

    set_seed(parameters['seed'])

    ###################################
    # Data Loading
    ###################################
    print('Loading training data ...')
    train_loader = DataLoader(parameters['train_data'], parameters)
    train_loader(embeds=parameters['embeds'])
    train_data = DocRelationDataset(train_loader, 'train', parameters, train_loader).__call__()

    print('\nLoading testing data ...')
    test_loader = DataLoader(parameters['test_data'], parameters)
    test_loader()
    test_data = DocRelationDataset(test_loader, 'test', parameters, train_loader).__call__()

    ###################################
    # Training
    ###################################
    trainer = Trainer(train_loader, parameters, {'train': train_data, 'test': test_data}, model_folder)
    trainer.run()

    if parameters['plot']:
        plot_learning_curve(trainer, model_folder)

    if parameters['save_model']:
        save_model(model_folder, trainer, train_loader)
Esempio n. 2
0
 def test(self):
     test_loader = DataLoader(
         os.path.join(self.config['global']['folders']['datasets'],
                      self.config['global']['files']['datasets']['test']))
     self.model.evaluate_generator(
         generator=test_loader.flow(batch=self.batch),
         val_samples=test_loader.size)
Esempio n. 3
0
def run():
    '''
    Main method of the package.
    '''
    # ------------- LOAD DATA -------------- #
    loader = DataLoader()
    training_set, test_set = loader.leave_one_out(test_index=0)

    # --------------- TRAINING ---------------- #
    trainlandmarks = training_set[1]
    # train a Feature Detection system
    featuredetector = FDTraining()
    # fully automatic:
    featuredetector.search_region = featuredetector.scan_region(trainlandmarks,
                                                                diff=55,
                                                                searchStep=20)
    # semi-automatic:
    # featuredetector.search_region = ((880, 1125), (1350, 1670), 20)
    print '---Search space set to', featuredetector.search_region
    print 'Done.'

    # build and train an Active Shape Model
    asm = ASMTraining(training_set, k=3, levels=3)

    # --------------- TESTING ----------------- #
    testimage, testlandmarks = test_set
    # remove some noise from the test image
    testimage = remove_noise(testimage)

    # perform feature matching to find init regions
    print '---Searching for matches...'
    matches = featuredetector.match(testimage)
    print 'Done.'

    # or perform manual initialisation (click on center)
    matches = [featuredetector._ellipse(plot.set_clicked_center(testimage))]

    for i in range(len(matches)):
        # search and fit image
        new_fit = asm.activeshape.multiresolution_search(testimage,
                                                         matches[i],
                                                         t=10,
                                                         max_level=2,
                                                         max_iter=10,
                                                         n=0.2)
        # Find the target that the new fit represents in order
        # to compute the error. This is done by taking the smallest
        # MSE of all targets.
        mse = np.zeros((testlandmarks.shape[0], 1))
        for i in range(mse.shape[0]):
            mse[i] = mean_squared_error(testlandmarks[i], new_fit)
        best_fit_index = np.argmin(mse)
        # implement maximally tolerable error
        if int(mse[best_fit_index]) < MSE_THRESHOLD:
            print 'MSE:', mse[best_fit_index]
            plot.render_shape_to_image(testimage,
                                       testlandmarks[best_fit_index],
                                       color=(0, 0, 0))
        else:
            print 'Bad fit. Needs to restart.'
Esempio n. 4
0
def train(parameters):
    model_folder = setup_log(parameters, 'train')

    set_seed(0)

    ###################################
    # Data Loading
    ###################################
    print('\nLoading training data ...')
    train_loader = DataLoader(parameters['train_data'], parameters)
    train_loader(embeds=parameters['embeds'])
    train_data = RelationDataset(train_loader, 'train',
                                 parameters['unk_w_prob'],
                                 train_loader).__call__()

    print('\nLoading testing data ...')
    test_loader = DataLoader(parameters['test_data'], parameters)
    test_loader()
    test_data = RelationDataset(test_loader, 'test', parameters['unk_w_prob'],
                                train_loader).__call__()

    ###################################
    # TRAINING
    ###################################
    trainer = Trainer({
        'train': train_data,
        'test': test_data
    }, parameters, train_loader, model_folder)
    trainer.run()

    trainer.eval_epoch(final=True, save_predictions=True)
    if parameters['plot']:
        plot_learning_curve(trainer, model_folder)
Esempio n. 5
0
    def __init__(self):
        self.img_rows = 128
        self.img_cols = 128
        self.channels = 3
        self.n_features = 128
        self.n_classes = 31
        self.img_shape = (self.img_rows, self.img_cols, self.channels)

        self.data_loader = DataLoader(img_res=(self.img_rows, self.img_cols),
                                      n_classes=self.n_classes)

        optimizer = Adam(0.0002, 0.5)

        self.D_R = build_discriminator(self.img_shape)
        self.D_F = build_feature_discriminator(self.n_features)
        self.D_R.compile(loss='binary_crossentropy',
                         optimizer=optimizer,
                         metrics=['accuracy'])
        self.D_F.compile(loss='binary_crossentropy',
                         optimizer=optimizer,
                         metrics=['accuracy'])

        self.Refiner = build_refiner(self.img_shape, self.channels)
        self.Feature = build_encoder(self.img_shape, self.n_features)
        self.Classifier = build_classifier(self.n_features, self.n_classes)

        self.D_R.trainable = False
        self.D_F.trainable = False

        self.Classifier.compile(loss='categorical_crossentropy',
                                optimizer=optimizer,
                                metrics=['accuracy'])
        self.Classifier.trainable = False

        self.GAN_1 = Sequential()
        self.GAN_1.add(self.Refiner)
        self.GAN_1.add(self.D_R)
        self.GAN_1.compile(loss='binary_crossentropy',
                           optimizer=optimizer,
                           metrics=['accuracy'])

        self.GAN_2 = Sequential()
        self.GAN_2.add(self.Refiner)
        self.GAN_2.add(self.Feature)
        self.GAN_2.add(self.D_F)
        self.GAN_2.compile(loss='binary_crossentropy',
                           optimizer=optimizer,
                           metrics=['accuracy'])

        self.GAN_3 = Sequential()
        self.GAN_3.add(self.Refiner)
        self.GAN_3.add(self.Feature)
        self.GAN_3.add(self.Classifier)
        self.GAN_3.compile(loss='categorical_crossentropy',
                           optimizer=optimizer,
                           metrics=['accuracy'])
def weak_label(train_primitive_matrix):
	# Load data
	dl = DataLoader()
	# train_primitive_matrix, val_primitive_matrix, test_primitive_matrix, \
	# train_ground, val_ground, test_ground, mode, frameNums = dl.load_data(mode = 'auto', numFramesToLoad = 1000)
	_, val_primitive_matrix, _, \
	_, val_ground, _, mode, frameNums = dl.load_data(mode = 'auto', numFramesToLoad = 1000, need_split = False)
	# Pass into reef
	return_matrix = reef_label(train_primitive_matrix, val_primitive_matrix, val_ground, None)
	return(return_matrix)
Esempio n. 7
0
def main(params):
    # Arguments passed down from the parser
    download_data_path = params['input_data_path']
    data_basepath = params['output_data_path']
    logs_path = params['logs_path']
    plots_path = params['plots_path']
    contour_type = params['contour_type']
    toggle_plot = params['toggle_plot']
    mini_batch_size = params['mini_batch_size']

    # Set up logging
    _setup_logging(logs_path)

    # Meat of the python program
    logging.info(
        'Started running preprocessor for the following parameters: {}'.format(
            params))
    reader = DataReader(download_data_path=download_data_path,
                        data_basepath=data_basepath,
                        logs_path=logs_path,
                        plots_path=plots_path,
                        contour_type=contour_type,
                        save_plot=toggle_plot)
    images, masks, metadata = reader.load_samples(reader.sample_tuples)
    loader = DataLoader(output_dir=data_basepath,
                        images=images,
                        masks=masks,
                        metadata=metadata,
                        mini_batch_size=mini_batch_size)
    minibatches = loader.random_mini_batches()

    # If user enabled the toggle_plot to evaluate the reader and loader modules
    if toggle_plot:
        # Check out the overall view of all samples (dicoms, masks) with no shuffle and no partitioning
        logging.debug(
            'Plotting the overall view of all (dicom, mask) samples...')
        reader.plot_samples(images, masks, metadata,
                            'data-reader_no-shuffle_batchset.jpg')

        # Check out first minibatch to see whether it matches the ones in 'data-reader_no-shuffle_batchset.jpg' with same label
        logging.debug(
            'Extracting and plotting the first minibatch to validate DataLoader against the previous plot from DataReader...'
        )
        for i, minibatch in enumerate(minibatches):
            if i > 1:
                break
            minibatch_image, minibatch_mask, minibatch_metadata = minibatch

        # minibatch_image (8,256,256), minibatch_mask (8,256,256), minibatch_metadata (8,)
        reader.plot_samples(minibatch_image, minibatch_mask,
                            minibatch_metadata,
                            'data-loader_shuffled_batchset.jpg')
        logging.info('Finished running preprocessor...')
Esempio n. 8
0
def run():
    '''
    Main method of the package.
    '''
    # ------------- LOAD DATA -------------- #
    loader = DataLoader()
    training_set, test_set = loader.leave_one_out(test_index=0)

    # --------------- TRAINING ---------------- #
    trainlandmarks = training_set[1]
    # train a Feature Detection system
    featuredetector = FDTraining()
    # fully automatic:
    featuredetector.search_region = featuredetector.scan_region(trainlandmarks, diff=55, searchStep=20)
    # semi-automatic:
    # featuredetector.search_region = ((880, 1125), (1350, 1670), 20)
    print '---Search space set to', featuredetector.search_region
    print 'Done.'

    # build and train an Active Shape Model
    asm = ASMTraining(training_set, k=3, levels=3)

    # --------------- TESTING ----------------- #
    testimage, testlandmarks = test_set
    # remove some noise from the test image
    testimage = remove_noise(testimage)

    # perform feature matching to find init regions
    print '---Searching for matches...'
    matches = featuredetector.match(testimage)
    print 'Done.'

    # or perform manual initialisation (click on center)
    matches = [featuredetector._ellipse(plot.set_clicked_center(testimage))]

    for i in range(len(matches)):
        # search and fit image
        new_fit = asm.activeshape.multiresolution_search(testimage, matches[i], t=10, max_level=2, max_iter=10, n=0.2)
        # Find the target that the new fit represents in order
        # to compute the error. This is done by taking the smallest
        # MSE of all targets.
        mse = np.zeros((testlandmarks.shape[0], 1))
        for i in range(mse.shape[0]):
            mse[i] = mean_squared_error(testlandmarks[i], new_fit)
        best_fit_index = np.argmin(mse)
        # implement maximally tolerable error
        if int(mse[best_fit_index]) < MSE_THRESHOLD:
            print 'MSE:', mse[best_fit_index]
            plot.render_shape_to_image(testimage, testlandmarks[best_fit_index], color=(0, 0, 0))
        else:
            print 'Bad fit. Needs to restart.'
Esempio n. 9
0
def run(sequences, base_path):
    kinect_nodes = ['KINECTNODE1', 'KINECTNODE2', 'KINECTNODE3', 'KINECTNODE4',
                    'KINECTNODE5', 'KINECTNODE6', 'KINECTNODE7', 'KINECTNODE8',
                    'KINECTNODE9', 'KINECTNODE10']
    edges = [
        (0, 1), (0, 2), (1, 15), (15, 16), (0, 3), (3, 4), (4, 5),
        (2, 6), (6, 7), (7, 8), (1, 17), (17, 18), (0, 9), (9, 10),
        (10, 11), (2, 12), (12, 13), (13, 14)
    ]
    failed_count = 0
    total_maps = 0
    for sequence in sequences:
        loader = DataLoader(base_path, sequence)
        mi, ma = loader.min_max()
        ma = mi+25  # TODO : temporary
        for idx in trange(mi, ma-len(kinect_nodes), len(kinect_nodes)):

            shuffle(kinect_nodes)  # TODO : turn back on

            with ProcessPoolExecutor() as executor:
                results = [executor.submit(create_tmap, loader, edges, i, node, idx)
                           for i, node in enumerate(kinect_nodes)]
                for f in as_completed(results):
                    total_maps += 1
                    try:
                        tmap, jmap, d_im = f.result()
                        # 217088*8*(1+19+18)*10
                        # Accumulate enough maps for about 10GB, then save compressed
                        if tmap.size*tmap.itemsize > 10000000000:
                            # Save the map with savez_compressed
                            # Clear accumulation map
                            pass
                        # size*itemsize*(d, j, e)*numkin*(maxidx)
                        # print(f'size: {tmap.size}, items: {tmap.dtype} ({tmap.itemsize} bytes)')
                    except ValueError:
                        failed_count += 1
                        continue

            # for i, node in enumerate(kinect_nodes):
            #     d_im, bodies, camera = loader.frame(idx+i, node)
            #     total_maps += 1
            #     try:
            #         tmap, _ = target_map(bodies, edges, camera, d_im.shape)
            #     except ValueError:
            #         # Failed to get TMAP
            #         failed_count += 1
            #         continue

            # print(kinect_nodes)
    print(f'FAILED: {failed_count}, TOTAL MAPS: {total_maps}')
Esempio n. 10
0
    def __init__(self,
                 filename=None,
                 page_duration=1.,
                 nchannels=None,
                 **kwargs):
        if 'position' not in kwargs:
            kwargs['position'] = (400, 300)
        if 'size' not in kwargs:
            kwargs['size'] = (800, 600)
        super(RawDataView, self).__init__(**kwargs)

        self.loader = DataLoader(filename,
                                 page_duration=page_duration,
                                 nchannels=nchannels)

        self.signals = SignalsVisual(self.loader.data)
Esempio n. 11
0
def skeleton_visualization_3d(loader: DataLoader, idx: int):
    bodies, univ_time = loader._bodies_univ_time(idx)
    skeletons, xs, ys, zs = read_bodies(bodies)

    fig = plt.figure(figsize=(4, 4))
    ax = fig.add_subplot(111, projection='3d')
    # Plot landmarks
    ax.scatter(xs, ys, zs)
    # Draw lines between edges in each skeleton
    for skeleton in skeletons:
        for edge in edges:
            coords_x = [skeleton[0, edge[0]], skeleton[0, edge[1]]]
            coords_y = [skeleton[1, edge[0]], skeleton[1, edge[1]]]
            coords_z = [skeleton[2, edge[0]], skeleton[2, edge[1]]]
            ax.plot(coords_x, coords_y, coords_z)
    # Ensure equal axis
    max_range = np.array(
        [xs.max() - xs.min(),
         ys.max() - ys.min(),
         zs.max() - zs.min()]).max() / 2.0

    mid_x = (xs.max() + xs.min()) * 0.5
    mid_y = (ys.max() + ys.min()) * 0.5
    mid_z = (zs.max() + zs.min()) * 0.5
    ax.set_xlim(mid_x - max_range, mid_x + max_range)
    ax.set_ylim(mid_y - max_range, mid_y + max_range)
    ax.set_zlim(mid_z - max_range, mid_z + max_range)

    plt.show()
Esempio n. 12
0
def show_depth_frame_as_pointcloud(kinect_node, idx, loader: DataLoader):
    depth_image, _, camera = loader.frame(idx, kinect_node)
    points = camera.project_frame(depth_image)

    pcd = o3d.geometry.PointCloud()
    pcd.points = o3d.utility.Vector3dVector(points)
    o3d.visualization.draw_geometries([pcd])
Esempio n. 13
0
    def __init__(self):
        config = ConfigLoader()
        self.parameters = config.load_config()
        self.model_folder = setup_log(self.parameters, 'train')

        set_seed(0)

        ###################################
        # Data Loading
        ###################################
        print('\nLoading training data ...')
        self.train_loader = DataLoader(parameters['train_data'], self.parameters)
        train_loader(embeds=self.parameters['embeds'])
        self.train_data = RelationDataset(train_loader, 'train', self.parameters['unk_w_prob'], train_loader).__call__()

        print('\nLoading testing data ...')
        test_loader = DataLoader(self.parameters['test_data'], parameters)
        test_loader()
        self.test_data = RelationDataset(test_loader, 'test', self.parameters['unk_w_prob'], train_loader).__call__()
Esempio n. 14
0
def test(parameters):
    print('*** Testing Model ***')
    model_folder = setup_log(parameters, 'test')

    print('Loading mappings ...')
    with open(os.path.join(model_folder, 'mappings.pkl'), 'rb') as f:
        loader = pkl.load(f)

    print('Loading testing data ...')
    test_loader = DataLoader(parameters['test_data'], parameters)
    test_loader.__call__()
    test_data = RelationDataset(test_loader, 'test', parameters['unk_w_prob'],
                                loader).__call__()

    m = Trainer({
        'train': [],
        'test': test_data
    }, parameters, loader, model_folder)
    trainer = load_model(model_folder, m)
    trainer.eval_epoch(final=True, save_predictions=True)
Esempio n. 15
0
def Train():
    global loader, training_set, test_set, trainlandmarks, pca
    # ------------- LOAD DATA -------------- #
    loader = DataLoader()
    training_set, test_set = loader.leave_one_out(test_index=0)

    # --------------- TRAINING ---------------- #
    trainlandmarks = training_set[1]

    # build and train an Active Shape Model
    asm = ASMTraining(training_set, k=3, levels=3)
    pca = asm.activeshape.pdmodel

    t = 0
    for i in range(len(pca.eigenvalues)):
        if sum(pca.eigenvalues[:i]) / sum(pca.eigenvalues) < 0.98:
            t = t + 1
        else:
            break

    print("Constructed model with {0} modes of variation".format(t))
Esempio n. 16
0
    def __init__(self, filename=None, page_duration=1., nchannels=None,
                 **kwargs):
        if 'position' not in kwargs:
            kwargs['position'] = (400, 300)
        if 'size' not in kwargs:
            kwargs['size'] = (800,600)
        super(RawDataView, self).__init__(**kwargs)

        self.loader = DataLoader(filename, page_duration=page_duration,
                            nchannels=nchannels)

        self.signals = SignalsVisual(self.loader.data)
Esempio n. 17
0
class RawDataView(PanZoomCanvas):
    def __init__(self,
                 filename=None,
                 page_duration=1.,
                 nchannels=None,
                 **kwargs):
        if 'position' not in kwargs:
            kwargs['position'] = (400, 300)
        if 'size' not in kwargs:
            kwargs['size'] = (800, 600)
        super(RawDataView, self).__init__(**kwargs)

        self.loader = DataLoader(filename,
                                 page_duration=page_duration,
                                 nchannels=nchannels)

        self.signals = SignalsVisual(self.loader.data)

    def on_mouse_wheel(self, event):
        super(RawDataView, self).on_mouse_wheel(event)
        if event.modifiers == (keys.CONTROL, ):
            sign = np.sign(event.delta[1])
            self.signals.signal_scale = np.clip(self.signals.signal_scale \
                                                *1.2**sign,
                                                1e-2, 1e2)

    def on_key_press(self, event):
        super(RawDataView, self).on_key_press(event)
        if event.key == 'Left':
            self.signals.data = self.loader.previous()
            self.update()
        elif event.key == 'right':
            self.signals.data = self.loader.next()
            self.update()
        elif event.key == 'Home':
            self.signals.data = self.loader.first()
            self.update()
        elif event.key == 'End':
            self.signals.data = self.loader.last()
            self.update()
Esempio n. 18
0
def main(args):
    # Load input file, prepare training and validation sets
    data_loader = DataLoader(args.input, args.pre_emb, args.dim_word,
                             args.batch_size, args.lowercase, args.zeros)

    # Save vocabularies
    with open(os.path.join(args.output, 'words_vocab.pkl'), 'wb') as f:
        cPickle.dump(data_loader.word_to_id, f)
    with open(os.path.join(args.output, 'char_vocab.pkl'), 'wb') as f:
        cPickle.dump(data_loader.char_to_id, f)
    with open(os.path.join(args.output, 'tag_vocab.pkl'), 'wb') as f:
        cPickle.dump(data_loader.tag_to_id, f)
    # Save parameters
    with open(os.path.join(args.output, 'args.json'), 'wb') as f:
        cPickle.dump(args, f)

    # Build model
    model = Model(args, data_loader)

    best_score = 0
    niter_without_improvement = 0
    for epoch in range(args.nepochs):
        print("Epoch {:} out of {:}".format(epoch + 1, args.nepochs))
        data_loader.reset_pointer()
        score = model.run_epoch(epoch)
        args.learning_rate *= args.decay_rate
        # early stopping and saving best parameters
        if score >= best_score:
            niter_without_improvement = 0
            model.save_session(args.output)
            best_score = score
            print("New best score: {}".format(score))
        else:
            niter_without_improvement += 1
            if niter_without_improvement >= args.early_stopping:
                print("Early stopping {} epochs without improvement".format(
                    niter_without_improvement))
                break
Esempio n. 19
0
def test(parameters):
    model_folder = setup_log(parameters, 'test')

    print('\nLoading mappings ...')
    train_loader = load_mappings(model_folder)
    
    print('\nLoading testing data ...')
    test_loader = DataLoader(parameters['test_data'], parameters)
    test_loader()    
    test_data = DocRelationDataset(test_loader, 'test', parameters, train_loader).__call__() 

    m = Trainer(train_loader, parameters, {'train': [], 'test': test_data}, model_folder)
    trainer = load_model(model_folder, m)
    trainer.eval_epoch(final=True, save_predictions=True)
Esempio n. 20
0
class RawDataView(PanZoomCanvas):
    def __init__(self, filename=None, page_duration=1., nchannels=None,
                 **kwargs):
        if 'position' not in kwargs:
            kwargs['position'] = (400, 300)
        if 'size' not in kwargs:
            kwargs['size'] = (800,600)
        super(RawDataView, self).__init__(**kwargs)

        self.loader = DataLoader(filename, page_duration=page_duration,
                            nchannels=nchannels)

        self.signals = SignalsVisual(self.loader.data)

    def on_mouse_wheel(self, event):
        super(RawDataView, self).on_mouse_wheel(event)
        if event.modifiers == (keys.CONTROL,):
            sign = np.sign(event.delta[1])
            self.signals.signal_scale = np.clip(self.signals.signal_scale \
                                                *1.2**sign,
                                                1e-2, 1e2)

    def on_key_press(self, event):
        super(RawDataView, self).on_key_press(event)
        if event.key == 'Left':
            self.signals.data = self.loader.previous()
            self.update()
        elif event.key == 'right':
            self.signals.data = self.loader.next()
            self.update()
        elif event.key == 'Home':
            self.signals.data = self.loader.first()
            self.update()
        elif event.key == 'End':
            self.signals.data = self.loader.last()
            self.update()
Esempio n. 21
0
def main():
    torch.manual_seed(42)

    parser = argparse.ArgumentParser()
    parser.add_argument('path', type=os.path.abspath)
    parser.add_argument('--dataset', default="div2k", type=str)
    parser.add_argument('--transform', default=None, type=str)
    parser.add_argument('--gpu', default=0, type=int)

    args = parser.parse_args()

    torch.cuda.set_device(args.gpu)

    validation = DataLoader(os.path.join("data", args.dataset, "val"),
                            shuffle=False,
                            num_workers=0)
    model = SteganoGAN.load(path=args.path)
    metrics = {field: list() for field in METRIC_FIELDS}
    model._validate(validation, metrics, transform=args.transform)
    metrics = {k: torch.tensor(v).mean().item() for k, v in metrics.items()}
    print(metrics)
def load_data(data_loc):
    """
    Load the data from an external excel resource.

    Parameters
    ----------
    data_loc: str
        Path to the data.

    Returns
    -------
    pd.DataFrame
        Data frame containing the data with additional pre and post phases
        added.
    """
    # load raw data
    loader = DataLoader(f_name=data_loc, s_name="Blad1")
    loaded_data, _ = loader.load(quick_loading=True)

    # Select columns
    if 'phase' in loaded_data.columns:
        loaded_data = loaded_data[[
            'DateTime', 'UserId', 'ExerciseId', 'LOID', 'Correct',
            'AbilityAfterAnswer', 'Effort', 'Lesson', 'LessonProgress', 'phase'
        ]]
    else:
        loaded_data = loaded_data[[
            'DateTime', 'UserId', 'ExerciseId', 'LOID', 'Correct',
            'AbilityAfterAnswer', 'Effort', 'Lesson', 'LessonProgress'
        ]]

    # Sort data
    loaded_data = loader.sort_data_by(loaded_data,
                                      ["DateTime", "LessonProgress"])

    # Filter unneeded
    loaded_data = loader.filter(filters, df=loaded_data)
    if not loader.quick_loaded:
        loaded_data = PhaseFinder().find_gynzy_phases_with_lesson_info(
            loaded_data, "")
        loader.quick_save(loaded_data)
    return loaded_data
Esempio n. 23
0
    def train(self):
        train_loader = DataLoader(
            os.path.join(self.config['global']['folders']['datasets'],
                         self.config['global']['files']['datasets']['train']))

        validation_loader = DataLoader(os.path.join(
            self.config['global']['folders']['datasets'],
            self.config['global']['files']['datasets']['validation']),
                                       random=False)

        h = self.model.fit_generator(train_loader.flow(self.batch),
                                     samples_per_epoch=self.samples,
                                     nb_epoch=self.epochs,
                                     validation_data=validation_loader.flow(
                                         self.batch),
                                     nb_val_samples=validation_loader.size)

        self.dump(h.history)
Esempio n. 24
0
    def __init__(self):
        self.img_rows = 128
        self.img_cols = 128
        self.channels = 3
        self.img_shape = (self.img_rows, self.img_cols, self.channels)

        self.dataset_name = 'chokepoint'
        self.data_loader = DataLoader(dataset_name=self.dataset_name,
                                      img_res=(self.img_rows, self.img_cols))

        patch = int(self.img_rows / 2**4)
        self.disc_patch = (patch, patch, 1)

        self.gf = 32
        self.df = 64

        self.lambda_c = 10.0                    
        self.lambda_id = 0.1 * self.lambda_c    

        optimizer = Adam(0.0002, 0.5)

        self.d_sim = self.build_discriminator()
        self.d_target = self.build_discriminator()
        self.d_sim.compile(loss='mse',
            optimizer=optimizer,
            metrics=['accuracy'])
        self.d_target.compile(loss='mse',
            optimizer=optimizer,
            metrics=['accuracy'])


        self.g_R1 = self.build_refiner()
        self.g_R2 = self.build_refiner()

        img_sim = Input(shape=self.img_shape)
        img_target = Input(shape=self.img_shape)

        refined_target = self.g_R1(img_sim)
        refined_sim = self.g_R2(img_target)

        rec_sim = self.g_R2(refined_target)
        rec_target = self.g_R1(refined_sim)

        img_sim_id = self.g_R2(img_sim)
        img_target_id = self.g_R1(img_target)


        self.d_sim.trainable = False
        self.d_target.trainable = False


        valid_sim = self.d_sim(refined_sim)
        valid_target = self.d_target(refined_target)


        self.combined = Model(inputs=[img_sim, img_target],
                              outputs=[ valid_sim, valid_target,
                                        rec_sim, rec_target,
                                        img_sim_id, img_target_id ])
        self.combined.compile(loss=['mse', 'mse',
                                    'mae', 'mae',
                                    'mae', 'mae'],
                            loss_weights=[  1, 1,
                                            self.lambda_c, self.lambda_c,
                                            self.lambda_id, self.lambda_id ],
                            optimizer=optimizer)
Esempio n. 25
0
class CGAN():
    def __init__(self):
        self.img_rows = 128
        self.img_cols = 128
        self.channels = 3
        self.img_shape = (self.img_rows, self.img_cols, self.channels)

        self.dataset_name = 'chokepoint'
        self.data_loader = DataLoader(dataset_name=self.dataset_name,
                                      img_res=(self.img_rows, self.img_cols))

        patch = int(self.img_rows / 2**4)
        self.disc_patch = (patch, patch, 1)

        self.gf = 32
        self.df = 64

        self.lambda_c = 10.0                    
        self.lambda_id = 0.1 * self.lambda_c    

        optimizer = Adam(0.0002, 0.5)

        self.d_sim = self.build_discriminator()
        self.d_target = self.build_discriminator()
        self.d_sim.compile(loss='mse',
            optimizer=optimizer,
            metrics=['accuracy'])
        self.d_target.compile(loss='mse',
            optimizer=optimizer,
            metrics=['accuracy'])


        self.g_R1 = self.build_refiner()
        self.g_R2 = self.build_refiner()

        img_sim = Input(shape=self.img_shape)
        img_target = Input(shape=self.img_shape)

        refined_target = self.g_R1(img_sim)
        refined_sim = self.g_R2(img_target)

        rec_sim = self.g_R2(refined_target)
        rec_target = self.g_R1(refined_sim)

        img_sim_id = self.g_R2(img_sim)
        img_target_id = self.g_R1(img_target)


        self.d_sim.trainable = False
        self.d_target.trainable = False


        valid_sim = self.d_sim(refined_sim)
        valid_target = self.d_target(refined_target)


        self.combined = Model(inputs=[img_sim, img_target],
                              outputs=[ valid_sim, valid_target,
                                        rec_sim, rec_target,
                                        img_sim_id, img_target_id ])
        self.combined.compile(loss=['mse', 'mse',
                                    'mae', 'mae',
                                    'mae', 'mae'],
                            loss_weights=[  1, 1,
                                            self.lambda_c, self.lambda_c,
                                            self.lambda_id, self.lambda_id ],
                            optimizer=optimizer)

    def build_refiner(self):

        def conv2d(layer_input, filters, f_size=4):
            d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
            d = LeakyReLU(alpha=0.2)(d)
            d = InstanceNormalization()(d)
            return d

        def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
            u = UpSampling2D(size=2)(layer_input)
            u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
            if dropout_rate:
                u = Dropout(dropout_rate)(u)
            u = InstanceNormalization()(u)
            u = Concatenate()([u, skip_input])
            return u


        d0 = Input(shape=self.img_shape)


        d1 = conv2d(d0, self.gf)
        d2 = conv2d(d1, self.gf*2)
        d3 = conv2d(d2, self.gf*4)
        d4 = conv2d(d3, self.gf*8)


        u1 = deconv2d(d4, d3, self.gf*4)
        u2 = deconv2d(u1, d2, self.gf*2)
        u3 = deconv2d(u2, d1, self.gf)

        u4 = UpSampling2D(size=2)(u3)
        output_img = Conv2D(self.channels, kernel_size=4, strides=1, padding='same', activation='tanh')(u4)

        return Model(d0, output_img)

    def build_discriminator(self):

        def d_layer(layer_input, filters, f_size=4, normalization=True):
            """Discriminator layer"""
            d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
            d = LeakyReLU(alpha=0.2)(d)
            if normalization:
                d = InstanceNormalization()(d)
            return d

        img = Input(shape=self.img_shape)

        d1 = d_layer(img, self.df, normalization=False)
        d2 = d_layer(d1, self.df*2)
        d3 = d_layer(d2, self.df*4)
        d4 = d_layer(d3, self.df*8)

        validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)

        return Model(img, validity)

    def train(self, epochs, batch_size=1, interval=50):

        start_time = datetime.datetime.now()

        valid = np.ones((batch_size,) + self.disc_patch)
        refined = np.zeros((batch_size,) + self.disc_patch)

        for epoch in range(epochs):
            for batch_i, (imgs_sim, imgs_target) in enumerate(self.data_loader.load_batch(batch_size)):

                refined_target = self.g_R1.predict(imgs_sim)
                refined_sim = self.g_R2.predict(imgs_target)

                dsim_loss_real = self.d_sim.train_on_batch(imgs_sim, valid)
                dsim_loss_refined = self.d_sim.train_on_batch(refined_sim, refined)
                dsim_loss = 0.5 * np.add(dsim_loss_real, dsim_loss_refined)

                dtarget_loss_real = self.d_target.train_on_batch(imgs_target, valid)
                dtarget_loss_refined = self.d_target.train_on_batch(refined_target, refined)
                dtarget_loss = 0.5 * np.add(dtarget_loss_real, dtarget_loss_refined)

                d_loss = 0.5 * np.add(dsim_loss, dtarget_loss)

                g_loss = self.combined.train_on_batch([imgs_sim, imgs_target],
                                                        [valid, valid,
                                                        imgs_sim, imgs_target,
                                                        imgs_sim, imgs_target])

                elapsed_time = datetime.datetime.now() - start_time

                print ("[Epoch %d/%d] [targetatch %d/%d] [DR loss: %f, acc: %3d%%] [R loss: %05f, adv: %05f, DF: %05f, id: %05f] time: %s " \
                                                                        % ( epoch, epochs,
                                                                            batch_i, self.data_loader.n_batches,
                                                                            d_loss[0], 100*d_loss[1],
                                                                            g_loss[0],
                                                                            np.mean(g_loss[1:3]),
                                                                            np.mean(g_loss[3:5]),
                                                                            np.mean(g_loss[5:6]),
                                                                            elapsed_time))

                if batch_i % interval == 0:
                    self.sample_images(epoch, batch_i)

    def sample_images(self, epoch, batch_i):
        os.makedirs('output/%s' % self.dataset_name, exist_ok=True)
        r, c = 1, 3

        imgs_sim = self.data_loader.load_data(domain="sim", batch_size=1, is_testing=True)
        imgs_target = self.data_loader.load_data(domain="target", batch_size=1, is_testing=True)

        refined_target = self.g_R1.predict(imgs_sim)
        refined_sim = self.g_R2.predict(imgs_target)

        rec_sim = self.g_R2.predict(refined_target)
        rec_target = self.g_R1.predict(refined_sim)

        gen_imgs = np.concatenate([imgs_sim, refined_target, rec_sim, imgs_target, refined_sim, rec_target])

        gen_imgs = 0.5 * gen_imgs + 0.5

        titles = ['Simulated', 'Refined','Target']
        fig, axs = plt.subplots(r, c)

        axs[0].imshow(gen_imgs[0])
        axs[0].set_title(titles[0])
        axs[0].axis('off')

        axs[1].imshow(gen_imgs[1])
        axs[1].set_title(titles[1])
        axs[1].axis('off')

        axs[2].imshow(gen_imgs[3])
        axs[2].set_title(titles[2])
        axs[2].axis('off')

        fig.savefig("output/%s/%d_%d.png" % (self.dataset_name, epoch, batch_i))
        plt.close()
def load(ql, f_name="./res/leerpaden_app.xlsx", id_="simone"):
    print("Loading data")
    loader = DataLoader(f_name=f_name, s_name="Blad1")
    data, transfer_data = loader.load(quick_loading=ql)
    log_data = None
    if id_ not in ["test"]:
        log_data = loader.load_log()
    if loader.quick_loaded is False:
        print("Organizing data")
        # data["DateTime"] = loader.combine_date_time(data["SubmitDate"],
        #                                             data["Time"])

        if id_ in [
                "kb_all", "kb_all_attempts_curve", "kb_smoothed_curves", "jm"
        ]:
            data = data[[
                'DateTime', 'UserId', 'ExerciseId', 'LOID', 'Correct',
                'AbilityAfterAnswer', 'Effort', 'Lesson', 'LessonProgress'
            ]]
        else:
            data = data[[
                'DateTime', 'UserId', 'ExerciseId', 'LOID', 'Correct',
                'AbilityAfterAnswer'
            ]]
        print("Preprocessing data")
        if id_ not in ["kb", "kb_all"]:
            if "LessonProgress" in data.columns:
                unfiltered = loader.sort_data_by(
                    data, ["DateTime", "LessonProgress"])
            else:
                unfiltered = loader.sort_data_by(data, "DateTime")
        else:
            unfiltered = data
        transfer_data = loader.first_attempts_only(
            ['UserId', 'ExerciseId', 'LOID'], df=transfer_data, copy_df=False)
        data = loader.filter(filters, df=unfiltered)
        # print(data.head())
        if id_ in [
                "karlijn_en_babette",
                "kb",
                "kb_all",
                "test",
                "jm",
        ]:
            data = PhaseFinder().find_gynzy_phases(data, id_)
        elif id_ in [
                "kb_all_attempts_curve",
                "kb_smoothed_curves",
        ]:
            data = PhaseFinder().find_gynzy_phases_with_lesson_info(data, id_)
        else:
            data = PhaseFinder().find_phases(data)
            data = correct(data)
        loader.quick_save(transfer_data, f_name="quicktransfer.pkl")
        loader.quick_save(data)
    first_att_data = loader.first_attempts_only(
        ['UserId', 'ExerciseId', 'LOID'], df=data)
    # print(data.loc[data.UserId == 59491].tail(40).values)
    return data, first_att_data, transfer_data, log_data
Esempio n. 27
0
cost = T.mean(ctc.cpu_ctc_th(network_output, input_lens, output, output_lens))
grads = T.grad(cost, wrt=network_output)
all_params = lasagne.layers.get_all_params(l_out)
updates = lasagne.updates.adam(cost, all_params, 0.001)

train = theano.function([l_in.input_var, input_lens, output, output_lens],
                        cost,
                        updates=updates)
predict = theano.function([l_in.input_var], network_output)
get_grad = theano.function([l_in.input_var, input_lens, output, output_lens],
                           grads)

from loader import DataLoader

data_loader = DataLoader(mbsz=mbsz,
                         min_len=min_len,
                         max_len=max_len,
                         num_classes=num_classes)

i = 1
while True:
    i += 1
    print i
    sample = data_loader.sample()
    cost = train(*sample)
    out = predict(sample[0])
    print cost
    print "input", sample[0][0].argmax(1)
    print "prediction", out[:, 0].argmax(1)
    print "expected", sample[2][:sample[3][0]]
    if i == 10000:
        grads = get_grad(*sample)
Esempio n. 28
0
    # 1-1)trainig model
    if args.train > 1:
        # read trainingdata
        print("Reading training data...")
        seqs, labels = read_all(args.dir)
        for i in range(len(seqs)):
            seqs[i] = torch.tensor(seqs[i]).unsqueeze(0).float()
        le = LabelEncoder()
        le = le.fit(labels)
        labels_en = le.transform(labels)
        print("-->Complete reading training data")
        print("-->num of training data:", len(labels))
        print(labels[0])
        print(seqs[0])
        print(seqs[0].shape)
        train_loader = DataLoader(length=1024, batch_size=64, n_batches=1000)
        train_loader(labels_en, seqs, labels_en)
        print(len(train_loader))

        # train model
        print("\nTraining model...")
        model = Discriminator(1024, len(labels)).float().to(device)
        optimizer = optim.Adam(model.parameters(), lr=args.rate)

        for epoch in range(args.epoch):
            train(model, device, train_loader, optimizer, epoch + 1)
            #val_loss = test(model, device, test_loader)
            print("")

    # 1-2)using trained model
    if args.train < 1:
Esempio n. 29
0
import tensorflow as tf
from model import Model
from loader import DataLoader
from flags import *
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

FLAGS = tf.app.flags.FLAGS

print('<Creating flags>')
create_flags()

print(f'<Loading data - {FLAGS.dataset}>')
dl = DataLoader(FLAGS.dataset)
dl.load()

print('<Defining model>')
tf.reset_default_graph()
model = Model()

print('<Testing model>')
print(f'Using IBP: {FLAGS.use_ibp}')
print(f'Test epsilon: {FLAGS.test_eps}')
model.test(dl, -1, use_ibp=FLAGS.use_ibp, test_eps=FLAGS.test_eps)
Esempio n. 30
0
def main(args):
    #initialize dataset class
    ldr = DataLoader(mode=0,
                     seed=args.seed,
                     path=args.dataset,
                     drp_percent=args.drp_impt)
    data_loader = torch.utils.data.DataLoader(ldr,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              drop_last=False)
    num_neurons = int(ldr.train[0].shape[0])

    #Initialize normalizing flow model neural network and its optimizer
    flow = util.init_flow_model(num_neurons, args.num_nf_layers, InterpRealNVP,
                                ldr.train[0].shape[0], args)
    nf_optimizer = torch.optim.Adam(
        [p for p in flow.parameters() if p.requires_grad == True], lr=args.lr)

    #Initialize latent space neural network and its optimizer
    num_hidden_neurons = [
        int(ldr.train[0].shape[0]),
        int(ldr.train[0].shape[0]),
        int(ldr.train[0].shape[0]),
        int(ldr.train[0].shape[0]),
        int(ldr.train[0].shape[0])
    ]
    nn_model = LatentToLatentApprox(int(ldr.train[0].shape[0]),
                                    num_hidden_neurons).float()
    if args.use_cuda:
        nn_model.cuda()
    nn_optimizer = torch.optim.Adam(
        [p for p in nn_model.parameters() if p.requires_grad == True],
        lr=args.lr)

    reset_scheduler = 2

    if args.dataset == 'news':
        print("\n****************************************")
        print("Starting OnlineNewsPopularity experiment\n")
    elif args.dataset == 'mnist':
        print("\n*********************************")
        print("Starting MNIST dropout experiment\n")
    else:
        print("Invalid dataset error")
        sys.exit()

    #Train and test MCFlow
    for epoch in range(args.n_epochs):
        util.endtoend_train(flow, nn_model, nf_optimizer, nn_optimizer,
                            data_loader, args)  #Train the MCFlow model

        with torch.no_grad():
            ldr.mode = 1  #Use testing data
            te_mse, _ = util.endtoend_test(flow, nn_model, data_loader,
                                           args)  #Test MCFlow model
            ldr.mode = 0  #Use training data
            print("Epoch", epoch, " Test RMSE", te_mse**.5)

        if (epoch + 1) % reset_scheduler == 0:
            #Reset unknown values in the dataset using predicted estimates
            if args.dataset == 'mnist':
                ldr.reset_img_imputed_values(nn_model, flow, args.seed, args)
            else:
                ldr.reset_imputed_values(nn_model, flow, args.seed, args)
            flow = util.init_flow_model(
                num_neurons, args.num_nf_layers, InterpRealNVP,
                ldr.train[0].shape[0],
                args)  #Initialize brand new flow model to train on new dataset
            nf_optimizer = torch.optim.Adam(
                [p for p in flow.parameters() if p.requires_grad == True],
                lr=args.lr)
            reset_scheduler = reset_scheduler * 2
Esempio n. 31
0
    vocab_X = data_npz['vocab_X'].item()
    vocab_Y = data_npz['vocab_Y'].item()

    model_path = model_dir / f'model_{args.epoch:03d}.pth'
    model = EncoderDecoder(**args_params)
    model.load_state_dict(torch.load(model_path.as_posix()))
    print(f'loaded model from {model_path}', file=sys.stderr)

    test_X = []
    test_max_length = 0
    for sentence in load_data('../data/chap3/test.en'):
        test_X.append(sentence_to_ids(vocab_X, sentence))
        test_max_length = max(test_max_length, len(test_X[-1]))

    test_dataloader = DataLoader(test_X, test_X, 1, shuffle=False)

    pred_Y = []
    for batch in test_dataloader:
        batch_X, _, lengths_X = batch
        pred = model(batch_X, lengths_X, max_length=lengths_X[0])
        pred = pred.max(dim=-1)[1].view(-1).data.cpu().numpy().tolist()
        if word2id['<EOS>'] in pred:
            pred = pred[:pred.index(word2id['<EOS>'])]
        pred_y = [vocab_Y.id2word[_id] for _id in pred]
        pred_Y.append(pred_y)

    with open('./submission.csv', 'w') as f:
        writer = csv.writer(f, delimiter=' ', lineterminator='\n')
        writer.writerows(pred_Y)
Esempio n. 32
0
def main(args):
    if args.gpu is not None:
        print('Using GPU %d' % args.gpu)
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
    else:
        print('CPU mode')

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_transform = transforms.Compose([
        transforms.RandomResizedCrop(227),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize,
    ])

    val_transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(227),
        #transforms.RandomResizedCrop(227),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize,
    ])
    # DataLoader initialize
    train_data = DataLoader(args.pascal_path,
                            'trainval',
                            transform=train_transform)
    t_trainloader = torch.utils.data.DataLoader(dataset=train_data,
                                                batch_size=args.batch,
                                                shuffle=True,
                                                num_workers=CORES,
                                                pin_memory=True)
    print('[DATA] Target Train loader done!')
    val_data = DataLoader(args.pascal_path,
                          'test',
                          transform=val_transform,
                          random_crops=args.crops)
    t_testloader = torch.utils.data.DataLoader(dataset=val_data,
                                               batch_size=args.batch,
                                               shuffle=False,
                                               num_workers=CORES,
                                               pin_memory=True)
    print('[DATA] Target Test loader done!')

    if not args.test:
        s_trainset = torchvision.datasets.ImageFolder(
            args.imgnet_path,
            transform=transforms.Compose([
                transforms.RandomHorizontalFlip(),
                transforms.RandomResizedCrop(227),
                transforms.ToTensor(), normalize
            ]))
        s_trainloader = torch.utils.data.DataLoader(dataset=s_trainset,
                                                    batch_size=5 * args.batch,
                                                    shuffle=False,
                                                    num_workers=CORES,
                                                    pin_memory=True)
        print('[DATA] Source Train loader done!')

    N = len(train_data.names)
    iter_per_epoch = N / args.batch

    model = Network(num_classes=21)
    g_model = Network(num_classes=21)
    d_model = disnet()

    if args.gpu is not None:
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        print('[MODEL] CUDA DEVICE : {}'.format(device))

        model.to(device)
        g_model.to(device)
        d_model.to(device)

    optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad,
                                       model.parameters()),
                                lr=args.lr,
                                momentum=0.9,
                                weight_decay=0.0001)
    g_optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad,
                                         g_model.parameters()),
                                  lr=args.lr,
                                  momentum=0.9,
                                  weight_decay=0.0001)
    d_optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad,
                                         d_model.parameters()),
                                  lr=args.lr,
                                  momentum=0.9,
                                  weight_decay=0.0001)

    if args.model is not None:
        checkpoint = torch.load(args.model)
        model.load(checkpoint['model'], True)
        g_model.load(checkpoint['g_model'], True)
        d_model.load_state_dict(checkpoint['d_model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        g_optimizer.load_state_dict(checkpoint['g_optimizer'])
        d_optimizer.load_state_dict(checkpoint['d_optimizer'])

    ############## TRAINING ###############
    print('Start training: lr %f, batch size %d' % (args.lr, args.batch))
    print('Checkpoint: ' + args.checkpoint)

    # Train the Model
    steps = args.iter_start
    best_mAP = 0.0
    best_path = './{}/model-{}_pretrained-{}_lr-0pt001_lmd_s-{}_acc-{}.pth'.format(
        args.checkpoint, 'alexnet', 'False', args.lmd_s, '{}')

    if args.test:
        args.epochs = 1

    for epoch in range(int(iter_per_epoch * args.iter_start), args.epochs):
        if not args.test:
            adjust_learning_rate(optimizer,
                                 epoch,
                                 init_lr=args.lr,
                                 step=100,
                                 decay=0.1)
            adjust_learning_rate(g_optimizer,
                                 epoch,
                                 init_lr=args.lr / 2,
                                 step=100,
                                 decay=0.1)
            adjust_learning_rate(d_optimizer,
                                 epoch,
                                 init_lr=args.lr / 1.5,
                                 step=100,
                                 decay=0.1)

            done = train(epoch, model, g_model, d_model, optimizer,
                         g_optimizer, d_optimizer, t_trainloader,
                         s_trainloader, args.lmd_s, device)

        best_mAP = test(epoch, model, g_model, d_model, optimizer, g_optimizer,
                        d_optimizer, t_testloader, best_mAP, best_path, device)
Esempio n. 33
0
handlers = [
    logging.FileHandler(os.path.join(opt.save_dir, 'output.log'), mode='w'),
    logging.StreamHandler()
]
logging.basicConfig(handlers=handlers, level=logging.INFO, format='')
logger = logging.getLogger()

NOISE_DIM = 100
NF = opt.nf
N_EMB = opt.nemb

if __name__ == '__main__':
    L = DataLoader(data_dir='data/',
                   n_emb=N_EMB,
                   method=opt.method,
                   batch_size=opt.batch,
                   shuffle=True,
                   validation_split=0.0)
    model, trainer = None, None
    if opt.method == 'cgan':
        model = CGAN
        trainer = CGANTrainer
    elif opt.method == 'acgan':
        model = ACGAN
        trainer = ACGANTrainer
    elif opt.method == 'wcgan':
        model = WCGAN
        trainer = WCGANTrainer

    G = model.Generator(noise_dim=NOISE_DIM, condition_dim=N_EMB, nf=NF)
    D = model.Discriminator(noise_dim=NOISE_DIM, condition_dim=N_EMB, nf=NF)
Esempio n. 34
0
l_out = lasagne.layers.ReshapeLayer(h3, ((max_len, mbsz, num_classes)))

network_output = lasagne.layers.get_output(l_out)

cost = T.mean(ctc.cpu_ctc_th(network_output, input_lens, output, output_lens))
grads = T.grad(cost, wrt=network_output)
all_params = lasagne.layers.get_all_params(l_out)
updates = lasagne.updates.adam(cost, all_params, 0.001)

train = theano.function([l_in.input_var, input_lens, output, output_lens], cost, updates=updates)
predict = theano.function([l_in.input_var], network_output)
get_grad = theano.function([l_in.input_var, input_lens, output, output_lens], grads)

from loader import DataLoader

data_loader = DataLoader(mbsz=mbsz, min_len=min_len, max_len=max_len, num_classes=num_classes)

i = 1
while True:
    i += 1
    print i
    sample = data_loader.sample()
    cost = train(*sample)
    out = predict(sample[0])
    print cost
    print "input", sample[0][0].argmax(1)
    print "prediction", out[:, 0].argmax(1)
    print "expected", sample[2][: sample[3][0]]
    if i == 10000:
        grads = get_grad(*sample)
        import ipdb