Пример #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--no-cuda', action='store_true')
    parser.add_argument('--boardSz', type=int, default=2)
    parser.add_argument('--batchSz', type=int, default=150)
    parser.add_argument('--testBatchSz', type=int, default=200)
    parser.add_argument('--nEpoch', type=int, default=100)
    parser.add_argument('--testPct', type=float, default=0.1)
    parser.add_argument('--save', type=str)
    parser.add_argument('--work', type=str, default='work')
    subparsers = parser.add_subparsers(dest='model')
    subparsers.required = True
    fcP = subparsers.add_parser('fc')
    fcP.add_argument('--nHidden', type=int, nargs='+', default=[100, 100])
    fcP.add_argument('--bn', action='store_true')
    convP = subparsers.add_parser('conv')
    convP.add_argument('--nHidden', type=int, default=50)
    convP.add_argument('--bn', action='store_true')
    spOptnetEqP = subparsers.add_parser('spOptnetEq')
    spOptnetEqP.add_argument('--Qpenalty', type=float, default=0.1)
    optnetEqP = subparsers.add_parser('optnetEq')
    optnetEqP.add_argument('--Qpenalty', type=float, default=0.1)
    optnetIneqP = subparsers.add_parser('optnetIneq')
    optnetIneqP.add_argument('--Qpenalty', type=float, default=0.1)
    optnetIneqP.add_argument('--nineq', type=int, default=100)
    optnetLatent = subparsers.add_parser('optnetLatent')
    optnetLatent.add_argument('--Qpenalty', type=float, default=0.1)
    optnetLatent.add_argument('--nLatent', type=int, default=100)
    optnetLatent.add_argument('--nineq', type=int, default=100)
    args = parser.parse_args()

    args.cuda = not args.no_cuda and torch.cuda.is_available()
    t = '{}.{}'.format(args.boardSz, args.model)
    if args.model == 'optnetEq' or args.model == 'spOptnetEq':
        t += '.Qpenalty={}'.format(args.Qpenalty)
    elif args.model == 'optnetIneq':
        t += '.Qpenalty={}'.format(args.Qpenalty)
        t += '.nineq={}'.format(args.nineq)
    elif args.model == 'optnetLatent':
        t += '.Qpenalty={}'.format(args.Qpenalty)
        t += '.nLatent={}'.format(args.nLatent)
        t += '.nineq={}'.format(args.nineq)
    elif args.model == 'fc':
        t += '.nHidden:{}'.format(','.join([str(x) for x in args.nHidden]))
        if args.bn:
            t += '.bn'
    if args.save is None:
        args.save = os.path.join(args.work, t)
    setproctitle.setproctitle('bamos.sudoku.' + t)

    with open('data/{}/features.pt'.format(args.boardSz), 'rb') as f:
        X = torch.load(f)
    with open('data/{}/labels.pt'.format(args.boardSz), 'rb') as f:
        Y = torch.load(f)

    N, nFeatures = X.size(0), int(np.prod(X.size()[1:]))

    nTrain = int(N * (1. - args.testPct))
    nTest = N - nTrain

    trainX = X[:nTrain]
    trainY = Y[:nTrain]
    testX = X[nTrain:]
    testY = Y[nTrain:]

    assert (nTrain % args.batchSz == 0)
    assert (nTest % args.testBatchSz == 0)

    save = args.save
    if os.path.isdir(save):
        shutil.rmtree(save)
    os.makedirs(save)

    npr.seed(1)

    print_header('Building model')
    if args.model == 'fc':
        nHidden = args.nHidden
        model = models.FC(nFeatures, nHidden, args.bn)
    if args.model == 'conv':
        model = models.Conv(args.boardSz)
    elif args.model == 'optnetEq':
        model = models.OptNetEq(args.boardSz, args.Qpenalty, trueInit=False)
    elif args.model == 'spOptnetEq':
        model = models.SpOptNetEq(args.boardSz, args.Qpenalty, trueInit=False)
    elif args.model == 'optnetIneq':
        model = models.OptNetIneq(args.boardSz, args.Qpenalty, args.nineq)
    elif args.model == 'optnetLatent':
        model = models.OptNetLatent(args.boardSz, args.Qpenalty, args.nLatent,
                                    args.nineq)
    else:
        assert False

    if args.cuda:
        model = model.cuda()

    fields = ['epoch', 'loss', 'err']
    trainF = open(os.path.join(save, 'train.csv'), 'w')
    trainW = csv.writer(trainF)
    trainW.writerow(fields)
    trainF.flush()
    fields = ['epoch', 'loss', 'err']
    testF = open(os.path.join(save, 'test.csv'), 'w')
    testW = csv.writer(testF)
    testW.writerow(fields)
    testF.flush()

    if 'optnet' in args.model:
        # if args.tvInit: lr = 1e-4
        # elif args.learnD: lr = 1e-2
        # else: lr = 1e-3
        lr = 1e-1
    else:
        lr = 1e-3
    optimizer = optim.Adam(model.parameters(), lr=lr)

    # writeParams(args, model, 'init')
    test(args, 0, model, testF, testW, testX, testY)
    for epoch in range(1, args.nEpoch + 1):
        # update_lr(optimizer, epoch)
        train(args, epoch, model, trainF, trainW, trainX, trainY, optimizer)
        test(args, epoch, model, testF, testW, testX, testY)
        torch.save(model, os.path.join(args.save, 'latest.pth'))
        # writeParams(args, model, 'latest')
        os.system('./plot.py "{}" &'.format(args.save))
Пример #2
0
                    training=False,
                    verbose=True)
if (args.trun == "testing"):
    k = min(1000, np.shape(X_test)[0])
    X_test = X_test[:k, :, :, :]
    Y_test_c = Y_test_c[:k, :]
    Y_test = Y_test[:k]
    labels = run_nn(datagen_test,
                    X_test,
                    Y_test_c,
                    Y_test,
                    args.batch,
                    training=False,
                    verbose=True)
if (args.trun == "deconv"):
    forward_net = models.Conv(pretrained=True, deconv=True)
    backward_net = deconv_models.Conv(pretrained=True)
    im = normalize_input("./data/cats/cat1.jpg", sz)
    out = forward_net.predict([im])
    print(len(out))
    print(list(map(np.shape, out)))
    out = backward_net.predict(out)
    #im = preprocess_image(X_test[0, :, :, :])
    #plt.imshow(np.resize(im, np.shape(im)[1:]))
    #plt.show()
    #out = model.predict([im])
    #print(len(out))
    #print(list(map(np.shape, out)))
    #out = deconv_model.predict(out)
    process_fmap(out, im)
    raise ValueError
Пример #3
0
    data = []
    for im_name in list_img:
        im = cv2.resize(cv2.imread(im_name), (sz, sz)).astype(np.float32)
        im[:, :, 0] -= 103.939
        im[:, :, 1] -= 116.779
        im[:, :, 2] -= 123.68
        #im = im.transpose((2, 0, 1))
        data.append(im)
    data = np.array(data)

    ###############################################
    # Action 1) Get max activation for a secp ~/deconv_specificlection of feat maps
    ###############################################
    get_max_act = True
    if not model:
        model = models.Conv(pretrained=True, deconv=True, sz=sz) 
        #model = load_model('./data/weights/vgg16_weights.h5')
        #model.compile(optimizer="sgd", loss='categorical_crossentropy')
        deconv_model = deconv_models.Conv(pretrained=True)
    out = model.predict(data)
    #print(model.summary())
    out = deconv_model.predict(out)
    raise ValueError
    if not Dec:
        Dec = KerasDeconv.DeconvNet(model)
    if get_max_act:
        layers = ['block1_conv1', 'block1_conv2', 'pool1', 'block2_conv1', 'pool2', 'block2_conv2', 'pool3']
	layer = layers[4]
        d_act_path = './data/dict_top9_mean_act.pickle'
        d_act = {layer: {},
                 }