Esempio n. 1
0
    def __init__(self,
                 trainer,
                 docs,
                 data,
                 message,
                 replay_memory=None,
                 beta=0,
                 docs_per_iteration=10000):
        self.trainer = trainer
        self.data = data
        self.model = trainer.model
        self.message = message
        self.replay_memory = replay_memory
        self.beta = beta
        self.loss_aggregator = Aggregator()
        self.evaluators = [
            evaluation.Evaluator(metric=evaluation.muc),
            evaluation.Evaluator(metric=evaluation.b_cubed),
            evaluation.Evaluator(metric=evaluation.ceafe)
        ]
        self.merged_pairs = {}
        self.training = self.replay_memory is not None

        print self.message
        random.shuffle(docs)
        if self.training:
            docs = docs[:docs_per_iteration]
        prog = util.Progbar(len(docs))
        for i, (doc, actionstate) in enumerate(docs):
            self.trainer.doc = doc
            self.trainer.actionstate = actionstate

            if len(actionstate.possible_pairs) != 0:
                actionstate.load(self.data, self.trainer.pair_model,
                                 self.trainer.anaphoricity_model)
                s = State(doc, actionstate)
                doc_merged_pairs = self.run_agent(s, beta, i)
                for evaluator in self.evaluators:
                    evaluator.update(doc)
                self.merged_pairs[doc.did] = doc_merged_pairs
                doc.reset()
                actionstate.clear()

            muc, b3, ceafe = (self.evaluators[i].get_f1() for i in range(3))
            exact = [('muc', 100 * muc), ('b3', 100 * b3),
                     ('ceafe', 100 * ceafe),
                     ('conll', 100 * (muc + b3 + ceafe) / 3),
                     ('loss', self.loss_aggregator.get_avg())]
            prog.update(i + 1, exact=exact)
Esempio n. 2
0
def run_2d_plot():
	# sim = simulation.TwoClassSimulation('data.json')
	sim = simulation.TwoClassSimulation('data_normalized.json')
	evl = evaluation.Evaluator(sim)
	res = evl.collect_results(n_tries)
	evl.export_results_2d(res, 'simulation_results.xlsx')
	evl.plot_results_2d(res, show_opt=True)
Esempio n. 3
0
def fine_tune(model,
              X_tr,
              Xtr_ca,
              y_tr,
              X_dev,
              y_dev,
              Xdev_ca,
              tgt_test,
              tag2idx_tgt,
              w2i,
              tags_tgt,
              max_len,
              epochs=100):
    """ Fine-tune and predict.

    """
    np.random.seed(42)
    random.seed(12345)
    tf.set_random_seed(1234)

    history = lc.fit_model(model, X_tr, Xtr_ca, y_tr, X_dev, y_dev, Xdev_ca,
                           epochs)
    print('Evaluating...')
    pred = lc.predict(model, tgt_test, tag2idx_tgt, w2i, tags_tgt, max_len)
    score = evaluation.Evaluator(pred, tgt_test, CONLLTAGSET)
    return history, score
Esempio n. 4
0
def run_3d_plot():
	sim = simulation.MultiClassSimulation()
	evl = evaluation.Evaluator(sim)
	res = evl.collect_results(n_tries)
	evl.export_results_3d(res, 'simulation_results.txt')
	res = helpers.csv_file_to_tuple_list('simulation_results.txt', float)
	print(evl.compute_opt(res))
	evl.plot_results_3d(res)
def fit_and_test_model(max_len, we, w2i, words):
    """ Fit and test a BiLSTM-CRF on the CONLL 2003 corpus. Return both the
    training history and the score (evaluated on the source testing file).

    """
    # NOTE Using the custom train/dev/test split.
    print("Obtaining train data...")
    trainfile = 'experiments/CONLL03_to_GUM/src_data/train.txt'
    testfile = 'experiments/CONLL03_to_GUM/src_data/test.txt'
    validfile = 'experiments/CONLL03_to_GUM/src_data/valid.txt'
    src_train = list(utils.read_NER_output(trainfile))
    src_test = list(utils.read_NER_output(testfile))
    src_dev = list(utils.read_NER_output(validfile))

    tags_src, tag2idx_src = lc.get_tag2idx(src_train + src_test + src_dev)

    X_tr, y_tr, Xtr_ca = lc.prepare_inputs_outputs(src_train, w2i, tag2idx_src,
                                                   max_len)
    X_te, y_te, Xte_ca = lc.prepare_inputs_outputs(src_test, w2i, tag2idx_src,
                                                   max_len)
    X_dev, y_dev, Xdev_ca = lc.prepare_inputs_outputs(src_dev, w2i,
                                                      tag2idx_src, max_len)

    print 'Saving the word embeddings for use later.'

    embeddings = {'we': we, 'w2i': w2i, 'l2i': tag2idx_src}

    embedding_utils.pkl_save('models_bilstmcrf/embeddings_1.pkl.gz',
                             [embeddings], "Embeddings")

    model, crf = lc.make_biLSTM_casing_CRF2(len(tag2idx_src),
                                            len(w2i),
                                            max_len,
                                            we,
                                            WORDVEC_DIM=int(WVDIM))

    optimizer = optimizers.SGD(lr=0.005, clipvalue=5.0)

    model.compile(optimizer=optimizer,
                  loss=crf.loss_function,
                  metrics=[crf.accuracy])

    history = lc.fit_model(model, X_tr, Xtr_ca, y_tr, X_dev, y_dev, Xdev_ca,
                           int(WVDIM),
                           "models_bilstmcrf/final_model_100_withcase3.h5")

    print("Finished fitting the model. Going to predict now...")
    pred = lc.predict(model, src_test, tag2idx_src, w2i, tags_src, max_len)
    score = evaluation.Evaluator(pred, src_test, set())

    return history, score
Esempio n. 6
0
def main(eval_mode: bool, feature_type: str, scene: str, hyper_params: dict,
         network_config: dict, eval_settings: dict, fft_params: dict) -> None:
    """
    Main function that takes hyper-parameters, creates the architecture, trains the model and evaluates it
    """
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    os.makedirs('results', exist_ok=True)
    experiment_id = datetime.now().strftime(
        "%Y%m%d-%H%M%S") + f' - {feature_type} - {scene}'
    writer = SummaryWriter(log_dir=os.path.join('tensorboard', experiment_id))
    shutil.copyfile('config.json', os.path.join(
        'results', 'config.json'))  # save current config file to results
    training_dataset = BaseDataset(feature_type, scene, hyper_params,
                                   fft_params)
    # create network
    classes = util.get_scene_classes(scene)
    plotter = Plotter(classes,
                      hop_size=fft_params['hop_size'],
                      sampling_rate=22050)
    # finalize network config parameters
    network_config['out_features'] = len(classes)
    if feature_type == 'spec':
        network_config['n_features'] = fft_params['n_fft'] // 2 + 1
    elif feature_type == 'mfcc':
        network_config['n_features'] = fft_params['n_mfcc']
    elif feature_type == 'mels':
        network_config['n_features'] = fft_params['n_mels']
    # create network
    net = SimpleCNN(**network_config)
    # Save initial model as "best" model (will be overwritten later)
    model_path = os.path.join('results',
                              f'best_{feature_type}_{scene}_model.pt')
    if not os.path.exists(model_path):
        torch.save(net, model_path)
    else:  # if there already exists a model, just load parameters
        print(f'reusing pre-trained model: "{model_path}"')
        net = torch.load(model_path, map_location=torch.device('cpu'))
    net.to(device)
    # get loss function
    loss_fn = torch.nn.BCELoss()
    # create adam optimizer
    optimizer = torch.optim.Adam(net.parameters(),
                                 lr=hyper_params['learning_rate'],
                                 weight_decay=hyper_params['weight_decay'])

    train_stats_at = eval_settings['train_stats_at']
    validate_at = eval_settings['validate_at']
    best_loss = np.inf  # best validation loss so far
    progress_bar = tqdm.tqdm(total=hyper_params['n_updates'],
                             desc=f"loss: {np.nan:7.5f}",
                             position=0)
    update = 0  # current update counter

    fold_idx = 1  # one random fold (defines split into training and validation set)
    rnd_augment = hyper_params['rnd_augment']
    # create subsets and data loaders
    if eval_mode:
        train_subset = training_dataset
        val_loader = None
    else:
        train_subset = Subset(training_dataset,
                              training_dataset.get_fold_indices(fold_idx)[0])
        val_subset = Subset(training_dataset,
                            training_dataset.get_fold_indices(fold_idx)[1])
        val_set = ExcerptDataset(val_subset,
                                 feature_type,
                                 classes,
                                 hyper_params['excerpt_size'],
                                 fft_params,
                                 overlap_factor=1,
                                 rnd_augment=False)
        val_loader = DataLoader(val_set,
                                batch_size=hyper_params['batch_size'],
                                shuffle=False,
                                num_workers=0)

    train_set = ExcerptDataset(
        train_subset,
        feature_type,
        classes,
        hyper_params['excerpt_size'],
        fft_params,
        overlap_factor=hyper_params['train_overlap_factor'],
        rnd_augment=rnd_augment)
    train_loader = DataLoader(train_set,
                              batch_size=hyper_params['batch_size'],
                              shuffle=True,
                              num_workers=0)

    n_updates = hyper_params['n_updates']
    # main training loop
    while update <= n_updates:
        if rnd_augment and update > 0:
            # regenerate new excerpts (in background) but use current ones for training
            train_set.generate_excerpts()
        for data in train_loader:
            inputs, targets, audio_file, idx = data
            inputs = inputs.to(device, dtype=torch.float32)
            targets = targets.to(device, dtype=torch.float32)
            optimizer.zero_grad()
            predictions = net(inputs)
            loss = loss_fn(predictions, targets)
            loss.backward()
            optimizer.step()

            if update % train_stats_at == 0 and update > 0:
                # log training loss
                writer.add_scalar(tag="training/loss",
                                  scalar_value=loss.cpu(),
                                  global_step=update)

            if not eval_mode and update % validate_at == 0 and update > 0:
                # evaluate model on validation set, log parameters and metrics
                val_loss, metrics, metrics_pp = validate_model(
                    net, val_loader, classes, update, device, plotter)
                print(f'val_loss: {val_loss}')
                f_score = metrics['segment_based']['overall']['F']
                err_rate = metrics['segment_based']['overall']['ER']
                f_score_pp = metrics_pp['segment_based']['overall']['F']
                err_rate_pp = metrics_pp['segment_based']['overall']['ER']
                print(f'f_score: {f_score}')
                print(f'err_rate: {err_rate}')
                print(f'f_score_pp: {f_score_pp}')
                print(f'err_rate_pp: {err_rate_pp}')
                params = net.parameters()
                log_validation_params(writer, val_loss, params, metrics,
                                      metrics_pp, update)
                # Save best model for early stopping
                if val_loss < best_loss:
                    print(
                        f'{val_loss} < {best_loss}... saving as new {os.path.split(model_path)[-1]}'
                    )
                    best_loss = val_loss
                    torch.save(net, model_path)

            if eval_mode:
                # in eval mode, just compare train_loss
                train_loss = loss.cpu()
                if train_loss < best_loss:
                    print(
                        f'{train_loss} < {best_loss}... saving as new {os.path.split(model_path)[-1]}'
                    )
                    best_loss = train_loss
                    torch.save(net, model_path)

            # update progress and update-counter
            progress_bar.set_description(f"loss: {loss:7.5f}", refresh=True)
            progress_bar.update()
            update += 1
            if update >= n_updates:
                break

    progress_bar.close()
    print('finished training.')

    print('starting evaluation...')
    evaluator = evaluation.Evaluator(feature_type, scene, hyper_params,
                                     network_config, fft_params, model_path,
                                     device, writer, plotter)
    evaluator.evaluate()
    print('zipping "results" folder...')
    util.zip_folder('results', f'results_{feature_type}_{scene}')
    return keypoints


if __name__ == "__main__":
    torch.manual_seed(1993)
    torch.cuda.manual_seed_all(1993)
    np.random.seed(1993)
    rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
    resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))

    experiment_options = Options()
    global args
    args = experiment_options.args

    #load paths
    with open('paths/main.yaml') as file:
        paths = yaml.load(file, Loader=yaml.FullLoader)
    log_path = paths['log_path']

    # config parameters
    args = experiment_options.args
    experiment_name = args.experiment_name
    dataset_name = args.dataset_name
    number_of_workers = args.num_workers

    keypoints = test(experiment_options, experiment_name, dataset_name,
                     number_of_workers, log_path)

    evaluator = evaluation.Evaluator(dataset_name, experiment_name, log_path)
    evaluator.Evaluate(keypoints)
Esempio n. 8
0
def run_littlewood_example():
	sim = simulation.TwoClassSimulation('data_normalized.json')
	evl = evaluation.Evaluator(sim)
	evl.plot_littlewood_example()
Esempio n. 9
0
    def __init__(
            self,
            constantSpeedIdx=None,
            vesselName='Fairmaster_2',
            shipLoading='normal',
            ecaFactor=1.5593,
            fuelPrice=300,  # Fuel price per metric tonne
            bathymetry=False,
            inputParameters=None,
            tb=None,
            criteria=None,
            seed=None,
            seeds=None):

        # Set pseudo-random generator seed for testing
        self.seeds = iter(seeds) if seeds is not None else None
        seed = None if self.seeds else seed
        np.random.seed(seed)
        random.seed(seed)

        if criteria is None and constantSpeedIdx is None:
            criteria = _criteria
        else:
            if constantSpeedIdx is None and (criteria['minimalTime']
                                             and criteria['minimalCost']):
                weights = (-1, -1)
            else:
                weights = (-1, )

            creator.create("FitnessMin", base.Fitness, weights=weights)
            creator.create("Individual", list, fitness=creator.FitnessMin)

        # Set parameters
        defaultParameters = {
            # Navigation area parameters
            'avoidAntarctic': True,
            'avoidArctic': True,
            'res': 'i',  # Resolution of shorelines
            'penaltyValue': 1,  # Penalty value for Bathymetry
            'graphDens': 4,  # Recursion level graph
            'graphVarDens': 6,  # Variable recursion level graph
            'splits':
            3,  # Threshold for split_polygon (val 3 yields best performance)

            # MOEA parameters
            'n': 336,  # Population size
            'nBar': 100,  # Local archive size (M-PAES)
            'cxpb': 0.81,  # Crossover probability (NSGAII, SPEA2)
            'mutpb': 0.28,  # Mutation probability (NSGAII, SPEA2)
            'maxMoves': 9,  # Max. number of mutations per selected individual
            'cr_trials': 5,  # Max recombination trials (M-PAES)
            'l_fails': 3,  # Max fails (M-PAES)
            'l_opt': 5,  # Max moves (M-PAES)

            # Stopping parameters
            'maxEvaluations': None,
            'gen': 150,  # Minimal number of generations
            'maxGDs': 40,  # Max length of generational distance list
            'minVar': 1e-6,  # Minimal variance of generational distance list

            # Mutation parameters
            'mutationOperators': ['speed', 'insert', 'move',
                                  'delete'],  # Operators to be included
            'widthRatio': 4.22,  # 7.5e-4 obtained from hyp param tuning
            'radius': 1.35,  # 0.39 obtained from hyp param tuning
            'scaleFactor': 0.1,  # Scale factor for Exponential distribution
            'delFactor': 1.2,  # Factor of deletions
            'gauss':
            False,  # Use Gaussian mutation for insert and move operators

            # Evaluation parameters
            'segLengthF':
            15,  # Length of linear approx. of great circle track for feasibility
            'segLengthC': 8  # same for ocean currents and wind along route
        }
        self.p = {
            **defaultParameters,
            **inputParameters
        } if inputParameters else defaultParameters
        self.tb = _tb if tb is None else tb
        self.criteria = criteria
        self.procResultsFP = None
        self.vessel = evaluation.Vessel(fuelPrice,
                                        vesselName,
                                        shipLoading,
                                        DIR=DIR)  # Vessel class instance
        self.fuelPrice = fuelPrice
        self.ecaFactor = ecaFactor  # Multiplication factor ECA fuel
        self.geod = geodesic.Geodesic()  # Geodesic class instance

        # Load and pre-process shoreline, ECA, and Bathymetry geometries
        navAreaGenerator = NavigableAreaGenerator(self.p, DIR=DIR)
        landTree = navAreaGenerator.get_shoreline_rtree()
        ecaTree = navAreaGenerator.get_eca_rtree()
        bathTree = navAreaGenerator.get_bathymetry_rtree()

        # Initialize "Evaluator" and register it's functions
        self.evaluator = evaluation.Evaluator(self.vessel,
                                              landTree,
                                              ecaTree,
                                              bathTree if bathymetry else None,
                                              ecaFactor,
                                              self.geod,
                                              criteria,
                                              self.p,
                                              DIR=DIR)
        self.speedIdx = constantSpeedIdx
        # Initialize "Initializer"
        self.initializer = initialization.Initializer(
            self.evaluator, self.vessel, landTree, ecaTree, bathTree,
            self.geod, self.p, creator.Individual, self.speedIdx, DIR, seed)

        # Load previously calculated initial paths
        self.initPathsDir = DIR / 'output/initialRoutes/RES_{}_D{}_VD_{}'.format(
            self.p['res'], self.p['graphDens'], self.p['graphVarDens'])
        if not os.path.exists(self.initPathsDir):
            os.mkdir(self.initPathsDir)
        self.initRoutesList = []
        if constantSpeedIdx is None:  # Initial paths are computed for speedIdx = None
            for fp in os.listdir(self.initPathsDir):
                with open(self.initPathsDir / fp, 'rb') as file:
                    self.initRoutesList.append(pickle.load(file))

        # Initialize "Operator" and register it's functions
        self.operators = Operators(self.evaluator.e_feasible, self.vessel,
                                   self.geod, self.p, seed)
        self.tb.register("mutate", self.operators.mutate)
        self.tb.register("mate", self.operators.cx_one_point)
        self.tb.register("population", initialization.init_repeat_list)
Esempio n. 10
0
def main():
    global args

    args = parser.parse_args()

    use_cuda = cuda_model.ifUseCuda(args.gpu_id, args.multiGpu)

    model = network.TURN(feature_size=args.input_size,
                         mid_layer_size=args.hidden_size,
                         drop=args.dropout)
    print("Number of Params \t{:d}".format(
        sum([p.data.nelement() for p in model.parameters()])))

    if args.resume is not None:
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        checkpoint = torch.load(args.resume,
                                map_location=lambda storage, loc: storage)
        args.start_epoch = checkpoint['epoch']
        # args.start_epoch = 0
        model.load_state_dict(checkpoint['state_dict'], strict=False)
        print("=> loading checkpoint '{:s}', epoch: {:d}\n".format(
            args.resume, args.start_epoch))
    else:
        print("Training from srcatch")

    model = cuda_model.convertModel2Cuda(model,
                                         gpu_id=args.gpu_id,
                                         multiGpu=args.multiGpu)

    feature_directory = '/home/zwei/datasets/THUMOS14/features/denseflow'
    train_clip_foreground_path = '/home/zwei/Dev/TURN_TAP_ICCV17/turn_codes/val_training_samples.txt'
    train_clip_background_path = '/home/zwei/Dev/TURN_TAP_ICCV17/turn_codes/background_samples.txt'
    val_clip_path = '/home/zwei/Dev/TURN_TAP_ICCV17/turn_codes/test_swin.txt'

    train_dataset = thumos14_iccv17.TrainDataSet(
        feature_directory=feature_directory,
        foreground_path=train_clip_foreground_path,
        background_path=train_clip_background_path,
        n_ctx=4,
        feature_size=args.input_size)
    val_dataset = thumos14_iccv17.EvaluateDataset(
        feature_directory=feature_directory,
        clip_path=val_clip_path,
        n_ctx=4,
        unit_size=16.,
        feature_size=args.input_size)

    train_dataloader = DataLoader(train_dataset,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=4)

    val_dataloader = DataLoader(val_dataset,
                                batch_size=args.batch_size,
                                shuffle=False,
                                num_workers=4)

    if args.eval:
        evaluator = evaluation.Evaluator(dataloader=val_dataloader,
                                         save_directory=args.branch,
                                         savename=os.path.basename(
                                             args.resume))
        evaluator.evaluate(model, use_cuda=use_cuda)
        sys.exit(0)

    model_optim = optim.Adam(filter(lambda p: p.requires_grad,
                                    model.parameters()),
                             lr=args.lr)

    best_status = {
        'train_accuracy': 0,
        'val_accuracy': 0,
        'train_loss': float('inf'),
        'val_loss': float('inf')
    }
    isBest_status = {
        'train_accuracy': 0,
        'val_accuracy': 0,
        'train_loss': 0,
        'val_loss': 0
    }

    for epoch in range(args.start_epoch, args.nof_epoch):

        total_losses = AverageMeter()
        loc_losses = AverageMeter()
        cls_losses = AverageMeter()
        Accuracy_cls = AverageMeter()
        Accuracy_loc = AverageMeter()

        model.train()
        pbar = progressbar.ProgressBar(max_value=len(train_dataloader))
        for i_batch, sample_batched in enumerate(train_dataloader):
            pbar.update(i_batch)

            feature_batch = Variable(sample_batched[0])
            offset_batch = Variable(sample_batched[1])
            label_batch = Variable(sample_batched[2])
            clip_batch = (sample_batched[3])

            if use_cuda:
                feature_batch = feature_batch.cuda()
                offset_batch = offset_batch.cuda()
                label_batch = label_batch.cuda()
                # clip_batch = clip_batch.cuda()

            if args.normalize > 0:
                feature_batch = F.normalize(feature_batch, p=2, dim=1)

            output_v = model(feature_batch)
            cls_logits, loc_logits, _, _ = network.extract_outputs(output_v)
            cls_loss = network.cls_loss(cls_logits, label_batch.long())
            loc_loss = network.loc_loss(loc_logits, offset_batch, label_batch)

            cls_accuracy = Metrics.accuracy_topN(cls_logits.data,
                                                 label_batch.long().data)
            loc_accuracy, n_valid = Metrics.IoU(clip_batch.numpy(),
                                                loc_logits.data.cpu().numpy(),
                                                label_batch.data.cpu().numpy())

            total_loss = cls_loss + args.plambda * loc_loss

            model_optim.zero_grad()
            total_loss.backward()
            model_optim.step()

            total_losses.update(total_loss.data[0], feature_batch.size(0))
            cls_losses.update(cls_loss.data[0], feature_batch.size(0))
            loc_losses.update(loc_loss.data[0], feature_batch.size(0))
            Accuracy_cls.update(cls_accuracy[0][0], feature_batch.size(0))
            Accuracy_loc.update(loc_accuracy, n_valid)

        print(
            "Train -- Epoch :{:06d}, LR: {:.6f},\tloc-loss={:.4f}\tcls-loss={:.4f}\tCls-Accuracy={:.4f}\tIoU={:.4f}"
            .format(epoch, model_optim.param_groups[0]['lr'], loc_losses.avg,
                    cls_losses.avg, Accuracy_cls.avg, Accuracy_loc.avg))

        if best_status['train_loss'] > total_losses.avg:
            best_status['train_loss'] = total_losses.avg
            isBest_status['train_loss'] = 1
        if best_status['train_accuracy'] < Accuracy_cls.avg:
            best_status['train_accuracy'] = Accuracy_cls.avg
            isBest_status['train_accuracy'] = 1

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'val_loss': best_status['val_loss'],
                'val_accuracy': best_status['val_accuracy'],
                'train_loss': best_status['train_loss'],
                'train_accuracy': best_status['train_accuracy']
            },
            isBest_status,
            file_direcotry=args.branch)

        for item in isBest_status.keys():
            isBest_status[item] = 0
Esempio n. 11
0
    def fit(    self, X, y, X_val=np.array([]), y_val=np.array([]), weights=None, 
                eval_weights_train=None, eval_weights_val=None  ):

        self.X = X
        self.y = y
        self.X_val = X_val
        self.y_val = y_val 
        self.J_train = []
        self.J_val = []
        self.score_train = []
        self.score_val = []  
        
        # init evaluators for the losses
        self.Loss_train = evaluation.Evaluator(self.loss)
        self.Loss_val = evaluation.Evaluator(self.loss)
        
        # init evaluators for the evals
        self.Eval_train = evaluation.Evaluator(self.evals, eval_weights_train)
        self.Eval_val = evaluation.Evaluator(self.evals, eval_weights_val)
        
        if self.optimization == 'Backpropagation':
            self.Learner = learning.Backpropagation(     
                        W                       = self.W, 
                        size_layers             = self.size_layers,
                        epochs                  = self.epochs,
                        batchsize               = self.batchsize,
                        activation              = self._activation, 
                        activation_prime        = self._activation_prime, 
                        out_activation          = self._out_activation, 
                        out_activation_prime    = self._out_activation_prime, 
                        Lambda                  = self.Lambda, 
                        dropout_frac            = self.dropout_frac,
                        denoise_frac            = self.denoise_frac,
                        weights                 = weights,
                        learning_rate           = self.learning_rate,
                        random_state            = self.random_state
                                                    )
        if self.optimization == 'iRPROP-':
            self.Learner = learning.iRPROPminus(     
                        W                       = self.W, 
                        size_layers             = self.size_layers,
                        epochs                  = self.epochs,
                        activation              = self._activation, 
                        activation_prime        = self._activation_prime, 
                        out_activation          = self._out_activation, 
                        out_activation_prime    = self._out_activation_prime, 
                        Lambda                  = self.Lambda, 
                        dropout_frac            = self.dropout_frac,
                        denoise_frac            = self.denoise_frac,
                        weights                 = weights,
                        random_state            = self.random_state
                                                    )
        if self.optimization == 'iRPROP+':
            self.Learner = learning.iRPROPplus(     
                        W                       = self.W, 
                        size_layers             = self.size_layers,
                        epochs                  = self.epochs,
                        activation              = self._activation, 
                        activation_prime        = self._activation_prime, 
                        out_activation          = self._out_activation, 
                        out_activation_prime    = self._out_activation_prime, 
                        Lambda                  = self.Lambda, 
                        dropout_frac            = self.dropout_frac,
                        denoise_frac            = self.denoise_frac,
                        weights                 = weights,
                        random_state            = self.random_state
                                                    )
        if 'scipy_minimize_' in self.optimization :
            self.Learner = learning.scipy_minimize(     
                        W                       = self.W, 
                        size_layers             = self.size_layers,
                        epochs                  = self.epochs,
                        activation              = self._activation, 
                        activation_prime        = self._activation_prime, 
                        out_activation          = self._out_activation, 
                        out_activation_prime    = self._out_activation_prime, 
                        Lambda                  = self.Lambda, 
                        dropout_frac            = self.dropout_frac,
                        denoise_frac            = self.denoise_frac,
                        weights                 = weights,
                        random_state            = self.random_state,
                        Loss_train              = self.Loss_train,
                        method                  = self.optimization.split("_")[-1]
                                                    )
        
        self._dynamic_plot(init=True)         
         
        # run optimization
        self.Learner.run(X, y, self._compute_costs_scores)   
Esempio n. 12
0
def main():
    args = arguments.parse()

    checkpoint = args.checkpoint if args.checkpoint else None

    model, params = get_network(args.arch,
                                args.n_attrs,
                                checkpoint=checkpoint,
                                base_frozen=args.freeze_base)

    criterion = get_criterion(loss_type=args.loss, args=args)

    optimizer = get_optimizer(params,
                              fc_lr=float(args.lr),
                              opt_type=args.optimizer_type,
                              momentum=args.momentum,
                              weight_decay=args.weight_decay)

    scheduler = optim.lr_scheduler.StepLR(optimizer,
                                          step_size=10,
                                          gamma=0.1,
                                          last_epoch=args.start_epoch - 1)
    if checkpoint:
        state = torch.load(checkpoint)
        model.load_state_dict(state["state_dict"])
        scheduler.load_state_dict(state['scheduler'])

    # Dataloader code
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    train_transforms = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize,
    ])
    val_transforms = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        normalize,
    ])

    logger.info("Setting up training data")
    train_loader = data.DataLoader(COCOAttributes(
        args.attributes,
        args.train_ann,
        train=True,
        split='train2014',
        transforms=train_transforms,
        dataset_root=args.dataset_root),
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   num_workers=args.workers,
                                   pin_memory=True)

    logger.info("Setting up validation data")
    val_loader = data.DataLoader(COCOAttributes(
        args.attributes,
        args.val_ann,
        train=False,
        split='val2014',
        transforms=val_transforms,
        dataset_root=args.dataset_root),
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 num_workers=args.workers,
                                 pin_memory=True)

    best_prec1 = 0

    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    logger.info("Beginning training...")

    for epoch in range(args.start_epoch, args.epochs):
        scheduler.step()

        # train for one epoch
        trainer.train(train_loader, model, criterion, optimizer, epoch,
                      args.print_freq)

        # evaluate on validation set
        # trainer.validate(val_loader, model, criterion, epoch, args.print_freq)
        prec1 = 0

        # remember best prec@1 and save checkpoint
        best_prec1 = max(prec1, best_prec1)
        trainer.save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'loss': args.loss,
                'optimizer': args.optimizer_type,
                'state_dict': model.state_dict(),
                'scheduler': scheduler.state_dict(),
                'batch_size': args.batch_size,
                'best_prec1': best_prec1,
            }, args.save_dir,
            '{0}_{1}_checkpoint.pth.tar'.format(args.arch, args.loss).lower())

    logger.info('Finished Training')

    logger.info('Running evaluation')
    evaluator = evaluation.Evaluator(model,
                                     val_loader,
                                     batch_size=args.batch_size,
                                     name="{0}_{1}".format(
                                         args.arch, args.loss))
    with torch.no_grad():
        evaluator.evaluate()