Beispiel #1
0
    print "Denominator: %2.10e nats" % np.log(den)
    print "Resulting P: %2.10e nats" % (num - np.log(den))
    end_time = time.clock()

    fn = os.path.join(cfg.basename, "info-0-%s.pickle" % cfg.idx)
    if os.path.exists(fn):
        with open(fn, "r") as f:
            stats = cPickle.load(f)
    else:
        stats = {}

    stats["weights2"] = np.sum(W ** 2)
    stats["nats"] = num - np.log(den)
    stats["nats_num"] = num
    stats["nats_den"] = np.log(den)
    stats["dev"] = cfg.device
    stats["basename"] = cfg.basename
    stats["bindex"] = cfg.idx
    stats["cpf_host"] = os.uname()[1]
    stats["cpf_duration"] = end_time - start_time
    stats["cpf_time"] = strftime("%Y-%m-%d %H:%M:%S")

    with open(fn, "wb") as f:
        cPickle.dump(stats, f)

    cp.exitCUDA()


if __name__ == "__main__":
    main()
Beispiel #2
0
def tearDownModule():
    cp.exitCUDA()
Beispiel #3
0
    pymlp = MLP(cfg, weights, biases)

    pymlp.preEpochHook = lambda mlp, epoch: epoch % 10 == 0 and mlp.runMLP(
        mbp_test, cfg.test_batchsize, epoch)
    try:
        pymlp.train(mbp, cfg.finetune_epochs, cfg.finetune_batch_size,
                    cfg.finetune_rprop)
    except KeyboardInterrupt:
        pass
    map(lambda x: x.alloc(), rbmstack.layers)
    map(lambda x: x.allocPChain(), rbmstack.layers)
    rbmstack.saveAllLayers("-finetune")
    pymlp.saveLastLayer()

if cfg.headless:
    cp.exitCUDA()
    sys.exit(0)
PLT_NUM = 1
import matplotlib.pyplot as plt

#### calculate maps_bottom into py. yeah it's a dirty hack, i know
px = cfg.px
py = cfg.py

if "projection_results" in rbmstack.__dict__:
    for layernum in rbmstack.projection_results.keys():
        filters = rbmstack.projection_results[layernum].T
        print "Saving projections from layer %d (%d x %d)" % (
            layernum, filters.shape[0], filters.shape[1])
        img_name = make_img_name("filter_layer%d.png" % (layernum))
        visualize_rows(PLT_NUM,
Beispiel #4
0
def tearDownModule():
    cp.exitCUDA()