Esempio n. 1
0
def train(args):
    train_data = Mnist_Data("./dataset/train.csv", "train")
    val_data = Mnist_Data("./dataset/train.csv", "val")
    train_loader = DataLoader(train_data, batch_size=100, shuffle=True)

    model = CNNModel()
    model.to(device)

    criterion = nn.CrossEntropyLoss()

    optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)

    scheduler = StepLR(optimizer, step_size=20, gamma=0.1)

    epochs = 30  #'''(int) iterations / len(train_data) / batch_size'''
    best_val_acc = None
    best_val_loss = None
    with torch.no_grad():
        best_val_acc, best_val_loss = evaluate(model, val_data, criterion)
    print('Best Validation Accuracy : {}'.format(best_val_acc))
    print('Best Validation Loss : {}'.format(best_val_loss))

    for epoch in range(epochs):
        train_loss = 0.0
        model.train()
        for i, (images, labels) in enumerate(tqdm(train_loader)):
            images = Variable(images.view(100, 1, 28, 28)).to(device)

            labels = Variable(labels).to(device)
            # print(images.shape, labels.shape)
            # print(labels.size())
            optimizer.zero_grad()
            output = model(images)
            loss = criterion(output, labels)
            train_loss += loss.item()
            loss.backward()
            optimizer.step()
            if i % 500 == 0:
                print('Epoch {}/{} : Step {}/{}, Loss: {:.4f}'.format(
                    epoch + 1, 30, i + 1, len(train_loader), loss.item()))
        with torch.no_grad():
            validation_acc, val_loss = evaluate(model, val_data, criterion)
        model.train()
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            best_val_acc = validation_acc
            best_epoch = epoch
            torch.save(
                model.state_dict(),
                "saved_models/best_epoch_digit_recognizer_mnist_data_pytorch1.6_lr_1e-2_aug_{}.pth"
                .format(epoch + 1))
        # torch.save(model.state_dict(), "weights_epoch_{}.pth".format(epoch + 1))
        print('Best Validation Loss : {}'.format(best_val_loss))
        print('Best Validation Accuracy : {}'.format(best_val_acc))
        print('Best Epoch: {}'.format(best_epoch + 1))
        print(
            'Epoch {}/{} Done | Train Loss : {:.4f} | Validation Loss : {:.4f} | Validation Accuracy : {:.4f}'
            .format(epoch + 1, 30, train_loss / len(train_loader), val_loss,
                    validation_acc))
    return best_val_loss
Esempio n. 2
0
def launch_simplified_mode(tmp_path, models, engine_config):
    model_name, model_framework, algorithm, preset, _ = SIMPLIFIED_TEST_MODELS[0]
    algorithm_config = make_algo_config(algorithm, preset)

    model = models.get(model_name, model_framework, tmp_path)
    config = merge_configs(model.model_params, engine_config, algorithm_config)

    _ = optimize(config)

    output_dir = os.path.join(config.model.exec_log_dir, 'optimized')
    model = os.path.join(output_dir, config.model.model_name + '.xml')
    weights = os.path.join(output_dir, config.model.model_name + '.bin')

    assert os.path.exists(model)
    assert os.path.exists(weights)

    paths = [{
        'model': model,
        'weights': weights
    }]

    config.engine = get_engine_config(model_name)
    metrics = evaluate(
        config=config, subset=range(1000), paths=paths)

    metrics = OrderedDict([(metric.name, np.mean(metric.evaluated_value))
                           for metric in metrics])

    for metric_name, metric_val in metrics.items():
        print('{}: {:.4f}'.format(metric_name, metric_val))

    return metrics
Esempio n. 3
0
def main(
    config_path: Path,
    dataset_path: Path,
    predict_path: Path,
    input_path: Path,
    evaluate_path: Optional[Path] = None,
) -> None:
    """
    Main function responsible for prediction with passed model.

    Arguments:
        Path config_path: Path to main config (of :class:`DefaultConfig` class)
        Path dataset_path: Path to dataset
        Path predict_path: Path to file with model predictions
        Path input_path: Path to file with input data
        Path evaluate_path: Path to evaluations
    """

    logger.info("Trainer")
    result = train(config_path=config_path, dataset_path=dataset_path)

    info = "Pipline required training_config.py with attribute save = True"
    assert len(vars(result)) != 1, info

    root = result.model_path.parent.parent

    logger.info("Predictor")
    predict(
        config_path=config_path,
        input_path=input_path,
        model_path=result.model_path,
        predict_path=root / predict_path,
        val_loader=result.val_loader,
    )

    if evaluate_path:
        logger.info("Evaluator")
        evaluate(
            config_path=config_path,
            input_path=input_path,
            predict_path=root / predict_path,
            evaluate_path=root / evaluate_path,
            val_loader=result.val_loader,
        )
Esempio n. 4
0
def test_sample_compression(_sample_params, tmp_path, models):
    model_name, model_framework, algorithm, preset, expected_accuracy, custom_mo_config = _sample_params

    # hack for sample imports because sample app works only from sample directory
    pot_dir = Path(__file__).parent.parent
    sys.path.append(str(pot_dir / 'sample'))
    # pylint: disable=C0415
    from openvino.tools.pot.api.samples.classification.classification_sample import optimize_model

    model = models.get(model_name,
                       model_framework,
                       tmp_path,
                       custom_mo_config=custom_mo_config)
    data_source, annotations = get_dataset_info('imagenet_1001_classes')

    args = Dict({
        'model': model.model_params.model,
        'dataset': data_source,
        'annotation_file': annotations['annotation_file']
    })

    model_, _ = optimize_model(args)

    paths = save_model(model_, tmp_path.as_posix(), model_name)
    model_xml = os.path.join(tmp_path.as_posix(), '{}.xml'.format(model_name))
    weights = os.path.join(tmp_path.as_posix(), '{}.bin'.format(model_name))

    assert os.path.exists(model_xml)
    assert os.path.exists(weights)

    algorithm_config = make_algo_config(algorithm, preset)
    engine_config = get_engine_config(model_name)
    config = merge_configs(model.model_params, engine_config, algorithm_config)
    config.engine = get_engine_config(model_name)

    metrics = evaluate(config=config, subset=range(1000), paths=paths)

    metrics = OrderedDict([(metric.name, np.mean(metric.evaluated_value))
                           for metric in metrics])

    for metric_name, metric_val in metrics.items():
        print('{}: {:.4f}'.format(metric_name, metric_val))
        if metric_name == 'accuracy@top1':
            assert {
                metric_name: metric_val
            } == pytest.approx(expected_accuracy, abs=0.006)
def run_algo(model, model_name, algorithm_config, tmp_path, reference_name):
    engine_config = get_engine_config(model_name)
    config = merge_configs(model.model_params, engine_config, algorithm_config)

    model = load_model(model.model_params)
    data_loader = create_data_loader(engine_config, model)
    engine = create_engine(engine_config, data_loader=data_loader, metric=None)
    pipeline = create_pipeline(algorithm_config.algorithms, engine)

    with torch.backends.mkldnn.flags(enabled=False):
        model = pipeline.run(model)
    paths = save_model(model, tmp_path.as_posix(), reference_name)
    engine.set_model(model)
    metrics = evaluate(config=config, subset=range(1000), paths=paths)
    metrics = OrderedDict([(metric.name, np.mean(metric.evaluated_value))
                           for metric in metrics])

    return metrics, model
Esempio n. 6
0
def train(args, net):
    # Get DataLoader
    data_loader = make_dataloader(args)

    # Get Optimizer
    optimizer = make_optimizer(args, net)

    # Get Criterion
    criterion = Loss(args=args)

    # Get Timer
    timer = Chronometer()

    # Get Logger
    logger = Logger(args=args)
    logger.print_net(net)

    # Check for Multi GPU Support
    if torch.cuda.device_count() > 1 and args.mGPU:
        net = torch.nn.DataParallel(net)

    # Create a directory for training files
    if not os.path.exists(args.ckpt):
        os.mkdir(args.ckpt)

    start_epoch = args.start_epoch
    if args.resume:
        checkpoint = torch.load(args.resumed_ckpt)
        start_epoch = checkpoint['epoch']

    best_accuracy = 0.0
    timer.set()
    for epoch in range(start_epoch, args.epochs):
        logger('Epoch: {}'.format(epoch + 1), prt=False)
        epoch_train_loss, is_best = 0.0, False

        with tqdm(total=len(data_loader),
                  ncols=0,
                  file=sys.stdout,
                  desc='Epoch: {}'.format(epoch + 1)) as pbar:

            for i, in_batch in enumerate(data_loader):
                optimizer.zero_grad()
                in_data, target = in_batch
                # Load to GPU
                if torch.cuda.is_available():
                    in_data, target = in_data.cuda(), target.cuda()
                # Forward Pass
                predicted = net(in_data)
                # Backward Pass
                loss = criterion(predicted, target)
                epoch_train_loss += loss.item()
                loss.backward()
                optimizer.step()

                # Update Progressbar
                if i % 50 == 49:
                    logger('[Train loss/batch: {0:.4f}]'.format(loss.item()),
                           prt=False)
                pbar.set_postfix(Loss=loss.item())
                pbar.update()

        epoch_train_loss /= len(data_loader)

        message = 'Average Training Loss : {0:.4f}'.format(epoch_train_loss)
        logger(message)

        # Check Performance of the trained Model on test set
        if epoch % args.evaluate_every_n_epoch == args.evaluate_every_n_epoch - 1:
            print('Network Evaluation...')
            net.eval()
            output = evaluate.evaluate(args, net)
            net.train()
            logger(output['message'])
            if output['accuracy'] > best_accuracy:
                best_accuracy = output['accuracy']
                is_best = True
            # save the checkpoint as best checkpoint so far
            save_checkpoint(
                {
                    'epoch':
                    epoch + 1,
                    'net_state_dict':
                    net.module.state_dict() if args.mGPU else net.state_dict()
                },
                is_best,
                filename=os.path.join(args.ckpt, 'checkpoint.pth.tar'),
                best_filename=os.path.join(args.ckpt,
                                           'best_checkpoint.pth.tar'))

    timer.stop()
    message = 'Finished Trainig Session in {0} hours & {1} minutes, Best Accuracy Achieved: {2:.2f}\n'.format(
        int(timer.elapsed / 3600), int((timer.elapsed % 3600) / 60),
        best_accuracy)
    logger(message)
    logger.end()
Esempio n. 7
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.gpus = args.gpus

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    model = APCModel(cfg)
    load_checkpoint(model, args.checkpoint, strict=True)
    generater = model.generator
    generater = MMDataParallel(generater, device_ids=[0])
    generater.eval()

    transform = GroupImageTransform(mean=cfg.img_norm_cfg['mean'],
                                    std=cfg.img_norm_cfg['std'],
                                    to_rgb=cfg.img_norm_cfg['to_rgb'])

    video_path = cfg.data.test.v_prefix
    video_list = os.listdir(video_path)
    video_list.sort()

    psnr_records = []
    for video in video_list:
        frame_list = os.listdir(os.path.join(video_path, video))
        frame_list.sort()
        size = len(frame_list)
        clip_length = cfg.data.test.time_steps + cfg.data.test.num_pred
        c, w, h = cfg.data.val.scale_size
        psnrs = np.empty(shape=(size, ), dtype=np.float32)
        for i in range(clip_length, size - 1):
            frame_clip = frame_list[i - clip_length:i]
            frame_arr = []
            for frame_path in frame_clip:
                frame_arr.append(
                    mmcv.imread(os.path.join(video_path, video, frame_path)))

            frames, img_shape, pad_shape, scale_factor, crop_quadruple = transform(
                frame_arr, (w, h), keep_ratio=False, div_255=False)
            frames = to_tensor(frames)
            size = len(frame_clip)
            frames = frames.reshape(c * size, w, h).unsqueeze(0)
            g_t = frames[:, :c * cfg.data.test.time_steps, :, :]
            g_t_1 = frames[:, c * cfg.data.test.time_steps:, :, :]
            p_t_1 = generater(g_t)

            psnr = psnr_error(p_t_1, g_t_1.cuda())
            psnrs[i] = psnr
        psnrs[:clip_length] = psnrs[clip_length]
        psnr_records.append(psnrs)

    result_dict = {
        'dataset': 'avenue',
        'psnr': psnr_records,
        'flow': [],
        'names': [],
        'diff_mask': []
    }

    # # TODO specify what's the actual name of ckpt.
    pickle_path = '../result/Avenue.pkl'
    with open(pickle_path, 'wb') as writer:
        pickle.dump(result_dict, writer, pickle.HIGHEST_PROTOCOL)

    results = evaluate.evaluate('compute_auc', pickle_path)
    print(results)
    optimizer = optim.Adam(model.parameters())

    criterion = CRITERION

    best_valid_loss = float('inf')
    print("Starting to train model...")
    train_losses = []
    valid_losses = []
    epochs = []
    for epoch in range(N_EPOCHS):

        start_time = time.time()

        train_loss = train(model, train_iterator, optimizer, criterion, CLIP,
                           epoch)
        valid_loss = evaluate(model, valid_iterator, criterion)
        train_losses.append(train_loss)
        valid_losses.append(valid_loss)
        epochs.append(epoch)
        end_time = time.time()

        epoch_mins, epoch_secs = epoch_time(start_time, end_time)

        if valid_loss < best_valid_loss:
            best_valid_loss = valid_loss
            torch.save(model.state_dict(), 'ckpts/ckpt_%d.pt' % epoch)

        long_string = f'Epoch: {epoch + 1:02} | Time: {epoch_mins}m {epoch_secs}s\n' +\
                      f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}\n' + \
                      f'\t Val. Loss: {valid_loss:.3f} |  Val. PPL: {math.exp(valid_loss):7.3f}\n'
        print(long_string)
def evolutionary_algorithm(init_parameters):
    str_time = 'Total time: \n'
    str_function = 'Function: \n'
    with open("tools/parameters.json") as f:
        parameters = json.load(f)

    if 'number_of_iterations' in parameters['config1'].keys():
        number_of_iterations = parameters['config1']['number_of_iterations']
    else:
        number_of_iterations = parameters['default']['number_of_iterations']

    current_generation = initialise(init_parameters)

    best_solution = evaluate(current_generation, init_parameters, True,
                             True)[random.randint(0,
                                                  len(current_generation) - 1)]

    content = display(best_solution, init_parameters, 0)
    image_map.loadFromData(content[0])
    label_map.setPixmap(image_map)
    label_best_solution.setText(content[1])
    # label_energy.setText(content[2])
    label_map.repaint()
    label_best_solution.repaint()
    #label_energy.repaint()

    for i in range(number_of_iterations):
        print('------------------------------')
        print('LOOP : ', i + 1)
        current_generation = evaluate(current_generation, init_parameters,
                                      True)
        parent_generation = current_generation
        parent_generation = remove_penalties(parent_generation)
        current_generation = select(current_generation)
        print('len po select', len(current_generation))
        current_generation = mutate(current_generation, init_parameters)
        print('len po mutate', len(current_generation))
        current_generation = evaluate(current_generation, init_parameters,
                                      False)
        current_generation = select_next_generation(parent_generation,
                                                    current_generation)
        print('len po select for next gener', len(current_generation))
        print("END GENERATION: ", current_generation)
        current_generation = remove_penalties(current_generation)
        best_solution = evaluate(current_generation, init_parameters, True,
                                 True)[0]
        content = display(best_solution, init_parameters, i + 1)
        image_map.loadFromData(content[0])
        label_map.setPixmap(image_map)
        label_best_solution.setText(content[1])
        str_time += content[3]
        str_function += content[4]
        str_algo_time = '\n' + 'In average solution, for number of iterations 10 and population size of 12,' \
                               ' we get around 800 requests from gmaps' + '\n' + 'with 50ms per request sums up to 40 seconds ' \
                                                                                 'plus around 5 seconds for requests from mapbox gives 45 seconds total (33%). ' + '\n' + '' \
                                                                                                                                                                          'Total time of algorithm = 140 seconds, time of calculations = 95 seconds (67% of total time).'
        label_energy.setText(str_time + '\n' + str_function + '\n' +
                             str_algo_time)
        label_map.repaint()
        label_best_solution.repaint()
        label_energy.repaint()
Esempio n. 10
0
def main(
    root_path: Path,
    experiments: str,
    model_paths: str,
    input_path: Path,
    predict_path: Path,
    config_path: Path,
    evaluate_path: Path,
) -> Path:
    """
    Main function responsible for prediction with passed model.

    Arguments:
        Path root_path: Path to the root folder for the subprocess
        str experiment_paths:  Relative paths to experiments separated by coma
        Path model_path: Subpath to trained model
        Path input_path: Path to file with input data
        Path predict_path: Path to output directory
        Path config_path: Path to main config (of :class:`DefaultConfig` class)
        Path evaluate_path: Path to evaluations

    Returns:
        Path to the experiment root dir.
    """
    models_with_config = []

    data_type = None
    prediction_bs = None
    main_config = None

    zipped = zip(unpack_string(experiments), unpack_string(model_paths))

    for index, (experiment, model_path) in enumerate(zipped):
        config_paths = (root_path / experiment / config_path).iterdir()
        config = get_config(config_paths)

        if index == 0:
            main_config = config

        model = load_model(config, root_path / experiment / model_path)
        model.eval()

        models_with_config.append(dict(model=model, config=config))

        data_type = set_param(previous=data_type,
                              current=config.training.dtype,
                              name="dtype")

        prediction_bs = set_param(
            previous=prediction_bs,
            current=config.prediction.batch_size,
            name="batch_size",
        )

    _, val_loader, _ = DataLoader.get_loaders(input_path, config=main_config)

    model_preds = []

    for model_with_config in models_with_config:
        model = model_with_config["model"]
        config = model_with_config["config"]

        all_preds = torch.tensor([])
        for x, _ in tqdm(val_loader, desc="Predictions"):
            predictions = model(x)
            processed_preds = pred_transform(
                preds=predictions, postprocessors=config.postprocessors)

            all_preds = torch.cat([all_preds, processed_preds])

        model_preds.append(all_preds)

    predictions = two_classifiers(model_preds, loader=val_loader)
    save_prediction(
        predictions=predictions,
        output_path=root_path / predict_path,
    )

    if evaluate_path:
        logger.info("Evaluator")
        evaluate(
            config=main_config,
            input_path=input_path,
            predict_path=root_path / predict_path,
            evaluate_path=root_path / evaluate_path,
            val_loader=val_loader,
        )