コード例 #1
0
def exc_train(train_func, X_train, network):
    print("Starting training...")
    print("Epoch\t\tIter\t\tLoss\t\tSpar\t\tTime")
    it_div = 100
    for epoch in range(NumEpochs):
        start_time = time.time()
        for it in range(it_div):
            # Iterate through mini batches
            total_loss = 0
            total_sparsity = 0
            n_batch = 0
            for batch in iterate_minibatches_ae(X_train[it::it_div],
                                                BatchSize,
                                                shuffle=True):
                batch = data_aug(batch)
                batch_target = np.reshape(batch, (batch.shape[0], -1))
                loss, mask = train_func(batch, batch_target)
                total_loss += loss
                total_sparsity += 100.0 * float(
                    np.count_nonzero(mask > 1e-6)) / mask.size
                n_batch += 1
            total_loss /= n_batch
            total_sparsity /= n_batch
            LearningRate.set_value(np.float32(0.99 * LearningRate.get_value()))

            print("{:d}\t\t{:d}\t\t{:.4f}\t\t{:.3f}\t\t{:.3f}".format(
                epoch, it, total_loss, total_sparsity,
                time.time() - start_time))
            start_time = time.time()

        if epoch % 1 == 0:
            param_values = layers.get_all_param_values(network)
            pickle.dump(param_values, open(filename_model_ae.format(epoch),
                                           'w'))
コード例 #2
0
def exc_test(encode_decode_func, X_train):
    all_imag = np.zeros(shape=(10000, 3, PS, PS), dtype=np.float32)
    all_feat = np.zeros(shape=(10000, 100, PS / 4, PS / 4), dtype=np.float32)
    all_mask = np.zeros(shape=(10000, 1, PS / 4, PS / 4), dtype=np.float32)
    all_nucl = np.zeros(shape=(10000, 3, PS, PS), dtype=np.float32)
    all_glob = np.zeros(shape=(10000, 3, PS, PS), dtype=np.float32)
    all_outp = np.zeros(shape=(10000, 3, PS, PS), dtype=np.float32)
    buf_n = 0

    print("Starting testing...")
    # Iterate through mini batches
    for batch in iterate_minibatches_ae(X_train, BatchSize, shuffle=True):
        batch = data_aug(batch, deterministic=True)
        feat, mask, nucl, glob, outp = encode_decode_func(batch)
        all_imag[buf_n:buf_n + BatchSize, :, :, :] = batch
        all_feat[buf_n:buf_n + BatchSize, :, :, :] = feat
        all_mask[buf_n:buf_n + BatchSize, :, :, :] = mask
        all_nucl[buf_n:buf_n + BatchSize, :, :, :] = nucl
        all_glob[buf_n:buf_n + BatchSize, :, :, :] = glob
        all_outp[buf_n:buf_n + BatchSize, :, :, :] = outp.reshape(
            (BatchSize, 3, PS, PS))
        buf_n += BatchSize
    all_imag = all_imag[0:buf_n]
    all_feat = all_feat[0:buf_n]
    all_mask = all_mask[0:buf_n]
    all_nucl = all_nucl[0:buf_n]
    all_glob = all_glob[0:buf_n]
    all_outp = all_outp[0:buf_n]

    return all_imag, all_feat, all_mask, all_nucl, all_glob, all_outp
コード例 #3
0
def exc_train(train_func, X_train, network):
    print("Starting training...");
    print("Epoch\t\tIter\t\tLoss\t\tSpar\t\tTime");
    it_div = 100;
    for epoch in range(NumEpochs):
        start_time = time.time();
        for it in range(it_div):
            # Iterate through mini batches
            total_loss = 0;
            n_batch = 0;
            for batch in iterate_minibatches_ae(X_train[it::it_div], BatchSize, shuffle=True):
                batch = data_aug(batch);
                batch_target = np.reshape(batch, (batch.shape[0], -1));
                loss = train_func(batch, batch_target);
                total_loss += loss;
                n_batch += 1;
            total_loss /= n_batch;
            LearningRate.set_value(np.float32(0.99*LearningRate.get_value()));

            print("{:d}\t\t{:d}\t\t{:.4f}\t\t{:.3f}".format(
                epoch, it, total_loss, time.time()-start_time));
            start_time = time.time();

            if it % 20 == 0:
                param_values = layers.get_all_param_values(network);
                pickle.dump(param_values, open(filename_model_ae.format(epoch), 'w'));
コード例 #4
0
def exc_train(train_func, X_train, network):
    print("Starting training...");
    print("Epoch\t\tIter\t\tLoss\t\tSpar\t\tTime");
    it_div = 100;
    BatchSize = 2;
    for epoch in range(NumEpochs):
        start_time = time.time();
        for it in range(it_div):
            # Iterate through mini batches
            total_loss = 0;
            total_sparsity = 0;
            n_batch = 0;
            for batch in iterate_minibatches_ae(X_train[it::it_div], BatchSize, shuffle=True):
                batch = data_aug(batch);
                batch_target = np.reshape(batch, (batch.shape[0], -1));
                loss, mask = train_func(batch, batch_target);
                total_loss += loss;
                total_sparsity += 100.0 * float(np.count_nonzero(mask>1e-6)) / mask.size;
                if n_batch % 20 == 0:
                    sample_sparsity = 100.0 * float(np.count_nonzero(mask[0, ...]>1e-5)) / mask[0, ...].size;
                    print("============{:.3f}============".format(sample_sparsity));
                n_batch += 1;
            total_loss /= n_batch;
            total_sparsity /= n_batch;
            LearningRate.set_value(np.float32(0.99*LearningRate.get_value()));

            print("{:d}\t\t{:d}\t\t{:.4f}\t\t{:.3f}\t\t{:.3f}".format(
                epoch, it, total_loss, total_sparsity, time.time()-start_time));
            start_time = time.time();

        if epoch % 1 == 0:
            param_values = layers.get_all_param_values(network);
            pickle.dump(param_values, open(filename_model_ae.format(epoch), 'w'));

            if it % 2 == 0:
                BatchSize += 1;
                if BatchSize > 32: BatchSize = 32;