def transfer_classifier_params(args):
    pytorch_model = TorchClassifier()
    with open(args.torch_classifier_model_path, 'rb') as f:
        weights = dill.load(f)
    state_dict = {k: torch.FloatTensor(v) for k, v in weights[0].items()}
    pytorch_model.load_state_dict(state_dict)
    for k in state_dict.keys():
        print('key: ', k)

    chainer_model = Classifier()

    copy_conv_weight(pytorch_model.LeNet[0], chainer_model.conv1)
    copy_conv_weight(pytorch_model.LeNet[2], chainer_model.conv2)
    copy_batch_norm_weight(pytorch_model.LeNet[3], chainer_model.batch_norm1)
    copy_conv_weight(pytorch_model.LeNet[5], chainer_model.conv3)
    copy_batch_norm_weight(pytorch_model.LeNet[6], chainer_model.batch_norm2)
    copy_conv_weight(pytorch_model.LeNet[8], chainer_model.conv4)
    copy_batch_norm_weight(pytorch_model.LeNet[9], chainer_model.batch_norm3)
    copy_conv_weight(pytorch_model.LeNet[11], chainer_model.conv5)

    if args.sample_image:
        subtractor = Subtractor()
        load_model(args.chainer_subtractor_model_path, subtractor)
        image1 = convert_to_grayscale(subtractor, args.sample_image).data
        image2 = np.zeros(shape=image1.shape, dtype=np.float32)
        print('image1 shape: ', image1.shape)
        print('image2 shape: ', image2.shape)

        classify_image_with_pytorch_model(pytorch_model, torch.Tensor(image1), torch.Tensor(image2))
        classify_image_with_chainer_model(chainer_model, chainer.Variable(image1), chainer.Variable(image2))

    save_model(args.chainer_classifier_save_path, chainer_model)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', type=int, default=0)
    parser.add_argument('--lstm',
                        action='store_true',
                        help='evaluate lstm network')
    parser.add_argument('--unroll-steps', type=int, default=20)
    parser.add_argument('--init-steps', type=int, default=11)
    parser.add_argument('--mean-image-file', type=str, default='')
    parser.add_argument('--model-dir', type=str, default='')
    parser.add_argument('--dataset-dir', type=str, default='')
    parser.add_argument('--skip-frames', type=int, default=4)
    parser.add_argument('--batch-size', type=int, default=32)
    parser.add_argument('--k-step', type=int, default=1)
    parser.add_argument('--iteration-num', type=int, default=1000000)
    parser.add_argument('--dataset-num', type=int, default=1)
    parser.add_argument('--output-file',
                        type=str,
                        default='evaluation_results')
    args = parser.parse_args()

    chainer.backends.cuda.set_max_workspace_size(10000000000)
    chainer.config.autotune = True

    dataset_files = [
        join_path(args.dataset_dir, 'test{}.pickle'.format(index))
        for index in range(args.dataset_num)
    ]
    dataset = ds.prepare_dataset(dataset_files)
    mean_image = ds.load_mean_image(args.mean_image_file)

    iterator = prepare_iterator(dataset, mean_image, args)

    losses = []
    if args.lstm:
        model = LstmPredictionNetwork()
    else:
        model = FeedForwardPredictionNetwork()
    if not args.gpu < 0:
        model.to_gpu()

    with open(args.output_file, "w") as f:
        f.write('iteration\tloss\n')
        for iteration in range(830000, args.iteration_num + 1, 10000):
            model_file = join_path(args.model_dir,
                                   'model_iter-{}'.format(iteration))
            print('loading model: {}'.format(model_file))
            if files.file_exists(model_file):
                serializers.load_model(model_file, model)
                loss = evaluate_model(model, iterator, args)
                losses.append(dict(iteration=iteration, loss=loss))
                f.write('{}\t{}\n'.format(iteration, loss.data))
            else:
                print('Model {} not found. skipping evaluation.'.format(
                    model_file))

    print('losses: {}'.format(losses))
Exemple #3
0
def load_params(naf, args):
    print('loading model params')

    naf._q.to_cpu()
    if args.q_params:
        if not files.file_exists(args.q_params):
            raise ValueError('Not trained parameter specified!!')
        serializers.load_model(args.q_params, naf._q)
    if not args.gpu < 0:
        naf._q.to_gpu()
Exemple #4
0
def prepare_model(args, num_actions):
    env_type = args.env_type
    if env_type == 'mujoco':
        model = PPOMujocoModel(num_actions)
    elif env_type == 'atari':
        model = PPOAtariModel(num_actions, args.atari_model_size)
    else:
        NotImplementedError("Unknown ent_type: ", env_type)
    serializers.load_model(args.model_params, model)
    return model
Exemple #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model-file', type=str, default='')
    parser.add_argument('--mean-image-file',
                        type=str,
                        default='mean_image.pickle')
    parser.add_argument('--dataset-file', type=str, default='100000.pickle')
    parser.add_argument('--gpu', type=int, default=0)
    parser.add_argument('--skip-frames', type=int, default=4)
    parser.add_argument('--initial-frame', type=int, default=0)
    parser.add_argument('--last-frame', type=int, default=10000)
    parser.add_argument('--init-steps', type=int, default=11)
    parser.add_argument('--show-dataset', action='store_true')
    parser.add_argument('--show-prediction', action='store_true')
    parser.add_argument('--show-mean-image', action='store_true')
    parser.add_argument('--show-sample-frame', action='store_true')
    parser.add_argument('--lstm', action='store_true')

    args = parser.parse_args()

    if args.show_dataset:
        dataset = load_dataset([args.dataset_file])
        animate_dataset(dataset, args)

    if args.show_prediction:
        dataset = load_dataset([args.dataset_file])
        if args.lstm:
            model = LstmPredictionNetwork()
        else:
            model = FeedForwardPredictionNetwork()
        serializers.load_model(args.model_file, model)
        if not args.gpu < 0:
            model.to_gpu()

        mean_image = ds.load_mean_image(args.mean_image_file)
        animate_predictions(model, dataset, mean_image, args)

    if args.show_mean_image:
        mean_image = ds.load_mean_image(args.mean_image_file)
        viewer.show_image(converter.chw2hwc(mean_image), title='mean image')

    if args.show_sample_frame:
        dataset = load_dataset([args.dataset_file])
        frame = dataset['frames'][args.initial_frame]
        viewer.show_image(converter.chw2hwc(frame), title='frame')
Exemple #6
0
def load_params(td3, args):
    print('loading model params')

    td3._q1.to_cpu()
    td3._q2.to_cpu()
    td3._pi.to_cpu()
    serializers.load_model(args.q1_params, td3._q1)
    serializers.load_model(args.q2_params, td3._q2)
    serializers.load_model(args.pi_params, td3._pi)
    if not args.gpu < 0:
        td3._q1.to_gpu()
        td3._q2.to_gpu()
        td3._pi.to_gpu()
Exemple #7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset-dir', type=str, default='dataset')
    parser.add_argument('--mean-image-file',
                        type=str,
                        default='mean_image.pickle')
    parser.add_argument('--token',
                        type=str,
                        default=None,
                        help='Slack client token')
    parser.add_argument('--max-iterations', type=int, default=1500000)
    parser.add_argument('--test-interval', type=int, default=10000)
    parser.add_argument('--color-channels', type=int, default=3)
    parser.add_argument('--skip-frames', type=int, default=4)
    parser.add_argument('--batch-size', type=int, default=32)
    parser.add_argument('--k-step', type=int, default=1)
    parser.add_argument('--num-actions', type=int, default=3)
    parser.add_argument('--gpu', type=int, default=0)
    parser.add_argument('--unroll-steps', type=int, default=20)
    parser.add_argument('--init-steps', type=int, default=11)
    parser.add_argument('--train-file-num', type=int, default=10)
    parser.add_argument('--test-file-num', type=int, default=1)
    parser.add_argument('--verify-batch',
                        action='store_true',
                        help='show created batch')
    parser.add_argument('--lstm',
                        action='store_true',
                        help='train lstm network')
    parser.add_argument('--learning-rate', type=float, default=1e-4)
    parser.add_argument('--model-file', type=str, default='')
    parser.add_argument('--snapshot-file', type=str, default='')

    args = parser.parse_args()

    print('loading mean image')
    mean_image = ds.load_mean_image(
        join_path(args.dataset_dir, args.mean_image_file))
    assert mean_image.shape == (args.color_channels, IMAGE_HEIGHT, IMAGE_WIDTH)

    if args.lstm:
        model = LstmPredictionNetwork()
    else:
        model = FeedForwardPredictionNetwork()

    if files.file_exists(args.model_file):
        print('loading model from: {}'.format(args.model_file))
        serializers.load_model(args.model_file, model)

    if not args.gpu < 0:
        model.to_gpu()

    train_data_files = [
        join_path(args.dataset_dir, 'train{}.pickle'.format(index))
        for index in range(args.train_file_num)
    ]
    test_data_files = [
        join_path(args.dataset_dir, 'test{}.pickle'.format(index))
        for index in range(args.test_file_num)
    ]

    train_iterator, test_iterator = prepare_iterator(train_data_files,
                                                     test_data_files,
                                                     mean_image, args)

    try:
        train_model(model, train_iterator, test_iterator, args=args)
    except:
        print('training finished with exception...')
Exemple #8
0
def prepare_subtractor(args):
    subtractor = Subtractor()
    serializers.load_model(args.subtractor_model, subtractor)
    return subtractor
Exemple #9
0
def prepare_classifier(args):
    classifier = Classifier()
    assert files.file_exists(args.classifier_parameter)
    serializers.load_model(args.classifier_parameter, classifier)
    return classifier
Exemple #10
0
def prepare_subtractor(args):
    subtractor = Subtractor()
    assert files.file_exists(args.subtractor_parameter)
    serializers.load_model(args.subtractor_parameter, subtractor)
    return subtractor
Exemple #11
0
def prepare_transition(args):
    transition = Transition()
    assert files.file_exists(args.transition_parameter)
    serializers.load_model(args.transition_parameter, transition)
    return transition
Exemple #12
0
def prepare_posterior(args):
    posterior = Posterior()
    assert files.file_exists(args.posterior_parameter)
    serializers.load_model(args.posterior_parameter, posterior)
    return posterior
Exemple #13
0
def prepare_generator(args):
    generator = Generator()
    assert files.file_exists(args.generator_parameter)
    serializers.load_model(args.generator_parameter, generator)
    return generator
Exemple #14
0
def load_optimizer(filepath, optimizer):
    serializers.load_model(filepath, optimizer)