def main():
    args = arguments.args
    args.update(params)

    args['loss_func'] = F.cross_entropy

    torch.manual_seed(args['seed'])
    np.random.seed(args['seed'])

    model = net.MLP(params['x_dim'], params['width'], params['y_dim'])
    # model = net.Linear(params['x_dim'], params['y_dim'])

    train_data = ToyColoredMNIST(train=True)
    valid_data = ToyColoredMNIST(train=True)
    test_data = ToyColoredMNIST(train=False)

    train_loader = DataLoader(train_data,
                              batch_size=params['batch_size'],
                              shuffle=True)
    valid_loader = DataLoader(valid_data,
                              batch_size=params['batch_size'],
                              shuffle=True)
    test_loader = DataLoader(test_data,
                             batch_size=params['test_batch_size'],
                             shuffle=True)

    model = trainer(model, train_loader, valid_loader, test_loader, args)

    new_data = ToyColoredMNIST(train=True)
    get_loss_var_across_colors(args, model, new_data)
    return
    def __init__(self, modelName='20160818_MNIST.model'):

        self.modelName = modelName

        self.window = self.create_window()

        # set canvas
        self.image1 = Image.new("RGB", (window_width, window_height),
                                (255, 255, 255))
        self.draw = ImageDraw.Draw(self.image1)

        # set neural network model
        self.mlp = net.MLP(784, 1000, 10)
        model = L.Classifier(self.mlp)
        serializers.load_hdf5(self.modelName, model)
Example #3
0
def main():
    args = arguments.args
    args.update(params)

    args['loss_func'] = F.mse_loss

    model = net.MLP(params['x_dim'], params['width'], params['y_dim'])

    convex_data = ConvexDataset()
    train_loader = DataLoader(convex_data,
                              batch_size=params['batch_size'],
                              shuffle=False)

    trainer(model, train_loader, args)
    return
Example #4
0
def main(args):
    if args.model == 'cnn':
        net = net_module.CNN()
    else:
        net = net_module.MLP(28 * 28, 10, 100)
    gpu_device = args.gpu
    if gpu_device >= 0:
        chainer.cuda.get_device(gpu_device).use()
        net.to_gpu(gpu_device)
        xp = cuda.cupy
    else:
        xp = np
    serializers.load_npz(args.model_file, net)
    image = Image.open(args.image_file).convert('L').resize((28, 28),
                                                            Image.BILINEAR)
    # 学習データは値の範囲が0~1なのでそれに合わせるために255で割る
    # 学習データは背景が0なので反転する
    image = 1 - xp.asarray(image).astype(np.float32) / 255
    image = image.reshape((1, -1))
    probs = cuda.to_cpu(predict(net, image))[0]
    results = sorted(zip(six.moves.range(10), probs), key=lambda x: -x[1])
    for n, p in results:
        print('{0:d}: {1:.4f}'.format(n, p))
Example #5
0
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--coeff-type',
                        default='all-one',
                        choices=('all-one', 'linear', 'harmonic'),
                        help='coefficent function used to weight neurons')
    parser.add_argument('--output',
                        default='models/model.pth',
                        help='output directory')
    args = parser.parse_args()
    args.cuda = torch.cuda.is_available()

    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    data = datasets.get_dataset(args.dataset_root, args.dataset,
                                args.batch_size, args.cuda)
    train_dataset, train_loader, test_dataset, test_loader = data
    x, _ = train_loader.__iter__().next()
    B, C, W, H = x.shape
    model = net.MLP(C * W * H, 10, coeff_type=args.coeff_type)

    if args.cuda:
        model = model.cuda()

    ps = filter(lambda x: x.requires_grad, model.parameters())
    train_model(model, args.output, train_loader, test_loader, args.lr,
                args.epochs)
Example #6
0
    parser.add_argument('--gpu', '-g', type=int, default=-1, help='GPU device index, -1 indicates CPU')
    parser.add_argument('--epoch', '-e', type=int, default=100, help='Number of epochs')
    parser.add_argument('--batch-size', '-b', type=int, default=100, help='Mini batch size')
    parser.add_argument('--prefix', '-p', type=str, default=None, help='prefix of saved file name')
    args = parser.parse_args()

    n_epoch = args.epoch
    batch_size = args.batch_size
    if args.prefix is None:
        prefix = args.model
    else:
        prefix = args.prefix
    if args.model == 'cnn':
        net = net_module.CNN()
    else:
        net = net_module.MLP(28 * 28, 10, 100)
    gpu_device = args.gpu
    if gpu_device >= 0:
        chainer.cuda.get_device(gpu_device).use()
        net.to_gpu(gpu_device)
        xp = cuda.cupy
    else:
        xp = np
    optimizer = optimizers.Adam()
    optimizer.setup(net)

    # MNISTデータセットを読み込む
    # get_mnistはMNISTデータセットファイルがなければダウンロードを行うので
    # 初回実行時は時間がかかる
    # データセットは"~/.chainer/dataset"以下に保存される
    train_data, test_data = chainer.datasets.get_mnist()
Example #7
0
 def __init__(self, action_space, state_dim, gamma=1):
     self.logits_net = net.MLP(input_dim=state_dim,
                               output_dim=action_space.__len__())
     self.optim = Adam(self.logits_net.parameters(), lr=0.01)
     self.action_space = action_space
Example #8
0
    else:
        assert False, "Invalid choice for rejection sampling"


    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    train_set, train_loader, test_set, test_loader = datasets.get_randomref_dataset("", 
                                                                                    args.batch_size, 
                                                                                    {'train' : args.train_size,
                                                                                     'test' : args.test_size},
                                                                                    args.numreferencenodes, 
                                                                                    args.quantization, 
                                                                                    args.rejectionsampling,
                                                                                    args.noise_scale,
                                                                                    args.ref_scale)

    adhoc_model = net.MLP(2*args.numreferencenodes, 1, activation=True)
    models = {'adhoc_model' : adhoc_model}

    if not os.path.exists(args.output):
        os.makedirs(args.output)
    out_str = "&".join(simulate_args_from_namespace(args, positional=['output']))
    out_str = args.output+out_str+".pth"  
    print(out_str)
    if args.cuda:
        for _, model in models.items():
            model = model.cuda()
    model_acc = train_model(models, out_str, train_loader, test_loader, args.lr, args.epochs, 
                            sisr_thres=args.sisr_thres)