예제 #1
0
    parser.add_argument('--exp', nargs='+', default=[], help="Nombre de los experimentos")


    args = parser.parse_args()
    lexperiments = args.exp

    if not args.batch:
        # 'e150514''e120503''e110616''e150707''e151126''e120511'
        lexperiments = ['e150514']

    for exp in lexperiments:

        datainfo = experiments[exp]

        for sensor in datainfo.sensors:
            print(sensor)
            for dfile in [datainfo.datafiles[0]]:
                print(dfile)

                f = datainfo.open_experiment_data(mode='r')
                data = datainfo.get_peaks_resample_PCA(f, dfile, sensor)

                for d in data:
                    mn = np.mean(d[0:20])
                    print d[0:20]
                    plotListSignals([d])

                datainfo.close_experiment_data(f)


예제 #2
0
            datamat = datainfo.get_raw_data(f, dfile)

            for i in range(len(vecsync)):
                saverage = np.zeros((len(lsensors), (wlen*2)))
                scounts = np.zeros(len(lsensors))
                for syn in vecsync[i]:
                    stime = time_sync_sensor(syn, nsensor)

                    for j in range(len(lsensors)):
                        saverage[j] += datamat[stime-wlen:stime+wlen,j]

                    # for s in syn:
                    #     saverage[s[0]] += datamat[stime-wlen:stime+wlen,s[0]]
                    #     scounts[s[0]] += 1

                for i in range(len(lsensors)):
                    saverage[i] /= len(vecsync[i])

                # for i in range(len(lsensors)):
                #     saverage[i] /= scounts[i]

                plotListSignals(saverage, ncols=2)

            datainfo.close_experiment_data(f)



            # for s in lsynchs_pruned:
            #     print s

예제 #3
0
    # construct the stacked denoising autoencoder class
    sda = SdA(
        numpy_rng=numpy_rng,
        n_ins=data.shape[1],
        hidden_layers_sizes=[1000, 500, 20, data.shape[1]],
        n_outs=10
    )
    # end-snippet-3 start-snippet-4

    pretraining_fns = sda.pretraining_functions(train_set_x=train_set_x,
                                                batch_size=batch_size)
    print('... pre-training the model')
    start_time = timeit.default_timer()
    ## Pre-train layer-wise
    corruption_levels = [.1, .1, .1, .1]
    for i in range(sda.n_layers):
        # go through pretraining epochs
        for epoch in range(pretraining_epochs):
            # go through the training set
            c = []
            for batch_index in range(n_train_batches):
                c.append(pretraining_fns[i](index=batch_index,
                         corruption=corruption_levels[i],
                         lr=pretrain_lr))
            print('Pre-training layer %i, epoch %d, cost %f' % (i, epoch, numpy.mean(c)))

    end_time = timeit.default_timer()

    print(sda.dA_layers[-1].W.get_value(borrow=True).shape)
    plotListSignals(sda.dA_layers[-1].W.get_value(borrow=True), ncols=5)
예제 #4
0
    parser.add_argument('--exp',
                        nargs='+',
                        default=[],
                        help="Nombre de los experimentos")

    args = parser.parse_args()
    lexperiments = args.exp

    if not args.batch:
        # 'e150514''e120503''e110616''e150707''e151126''e120511'
        lexperiments = ['e150514']

    for exp in lexperiments:

        datainfo = experiments[exp]

        for sensor in datainfo.sensors:
            print(sensor)
            for dfile in [datainfo.datafiles[0]]:
                print(dfile)

                f = datainfo.open_experiment_data(mode='r')
                data = datainfo.get_peaks_resample_PCA(f, dfile, sensor)

                for d in data:
                    mn = np.mean(d[0:20])
                    print d[0:20]
                    plotListSignals([d])

                datainfo.close_experiment_data(f)
예제 #5
0
    cost, updates = da.get_cost_updates(
        corruption_level=0.1,
        learning_rate=learning_rate
    )

    train_da = theano.function(
        [index],
        cost,
        updates=updates,
        givens={
            x: train_set_x[index * batch_size: (index + 1) * batch_size]
        }
    )

    start_time = timeit.default_timer()


    # go through training epochs
    for epoch in range(training_epochs):
        # go through trainng set
        c = []
        for batch_index in range(n_train_batches):
            c.append(train_da(batch_index))

        print('Training epoch %d, cost ' % epoch, numpy.mean(c))

    end_time = timeit.default_timer()

    print(da.W.get_value(borrow=True).shape)
    plotListSignals(da.W.get_value(borrow=True).T, ncols=5)
예제 #6
0
    da = dA(numpy_rng=rng,
            theano_rng=theano_rng,
            input=x,
            n_visible=data.shape[1],
            n_hidden=20)

    cost, updates = da.get_cost_updates(corruption_level=0.1,
                                        learning_rate=learning_rate)

    train_da = theano.function(
        [index],
        cost,
        updates=updates,
        givens={x: train_set_x[index * batch_size:(index + 1) * batch_size]})

    start_time = timeit.default_timer()

    # go through training epochs
    for epoch in range(training_epochs):
        # go through trainng set
        c = []
        for batch_index in range(n_train_batches):
            c.append(train_da(batch_index))

        print('Training epoch %d, cost ' % epoch, numpy.mean(c))

    end_time = timeit.default_timer()

    print(da.W.get_value(borrow=True).shape)
    plotListSignals(da.W.get_value(borrow=True).T, ncols=5)