Exemplo n.º 1
0
 def gen_iterators(self):
     (X_train, y_train, test) = self.load_data()
     train = ArrayIterator(X_train, y_train, nclass=1000, lshape=(3, 224, 224))
     test = ArrayIterator(X_train, y_train, nclass=1000, lshape=(3, 224, 224))
     self._data_dict = {'train': train,
                        'valid': test}
     return self._data_dict
Exemplo n.º 2
0
    def gen_iterators(self):
        if self.filepath is None:
            self.load_data()

        data = pad_data(self.filepath,
                        vocab_size=self.vocab_size,
                        sentence_length=self.sentence_length)
        (X_train, y_train), (X_test, y_test), nclass = data

        self._data_dict = {'nclass': nclass}
        self._data_dict['train'] = ArrayIterator(X_train, y_train, nclass=2)
        self._data_dict['test'] = ArrayIterator(X_test, y_test, nclass=2)
        return self._data_dict
Exemplo n.º 3
0
 def gen_iterators(self):
     train = ArrayIterator(self.train_x,
                           self.train_y,
                           lshape=self.shape,
                           make_onehot=False,
                           name='train')
     valid = ArrayIterator(self.valid_x,
                           self.valid_y,
                           lshape=self.shape,
                           make_onehot=False,
                           name='valid')
     self._data_dict = {'train': train, 'valid': valid}
     return self._data_dict
Exemplo n.º 4
0
 def gen_iterators(self):
     (X_train, y_train), (X_test, y_test), nclass = self.load_data()
     train = ArrayIterator(X_train,
                           y_train,
                           nclass=nclass,
                           lshape=(1, 28, 28),
                           name='train')
     val = ArrayIterator(X_test,
                         y_test,
                         nclass=nclass,
                         lshape=(1, 28, 28),
                         name='valid')
     self._data_dict = {'train': train, 'valid': val}
     return self._data_dict
Exemplo n.º 5
0
    def gen_iterators(self):
        datasets = self.load_data()

        (X_train, y_train), (X_test, y_test), nclass = datasets
        if self.pad_classes:
            nclass = 16

        train = ArrayIterator(X_train,
                              y_train,
                              nclass=nclass,
                              lshape=(3, 32, 32),
                              name='train')
        test = ArrayIterator(X_test,
                             y_test,
                             nclass=nclass,
                             lshape=(3, 32, 32),
                             name='valid')
        self._data_dict = {'train': train, 'valid': test}
        return self._data_dict
Exemplo n.º 6
0
  def run(self):
    global camera, autonomous_override
    open_camera()
    
    while True:
      if not autonomous:
        debug_print("Exiting autonomous thread")
        close_camera()
        turn_off_motors()
        break

      if autonomous_override:
        time.sleep(0)
        continue

      # Grab a still frame
      stream = picamera.array.PiRGBArray(camera)
      camera.capture(stream, 'rgb', use_video_port=False)

      debug_print("Grabbed a still frame")
      debug_start_timing()
      image = Image.fromarray(stream.array)
      image = image.resize(size, Image.ANTIALIAS)
      if (debug):
        image.save(my_dir + "train/debug/capture" + str(self.cnt) + ".png", "PNG")
        self.cnt = self.cnt + 1
      r, g, b = image.split()
      image = Image.merge("RGB", (b, g, r))
      image = np.asarray(image, dtype=np.float32)
      image = np.transpose(image, (2, 0, 1))
      x_new = image.reshape(1, 3*W*H) - 127

      if autonomous_override:
        time.sleep(0)
        continue

      # Run neural network
      inference_set = ArrayIterator(x_new, None, nclass=nclasses, lshape=(3, H, W))
      out = model.get_outputs(inference_set)
      debug_stop_timing()
      decision = out.argmax()
      debug_print(class_names[decision])

      if not autonomous_override:
        drive(decision)
Exemplo n.º 7
0
def test_recognition(test_file_name):
    # Load image
    image = Image.open(test_file_name)
    image.show()
    print("Loaded " + test_file_name)

    # Convert image to sample
    image = image.resize(size, Image.ANTIALIAS)
    r, g, b = image.split()
    image = Image.merge("RGB", (b, g, r))
    image = np.asarray(image, dtype=np.float32)
    image = np.transpose(image, (2, 0, 1))
    x_new = image.reshape(1, L) - 127

    # Run neural network
    inference_set = ArrayIterator(x_new,
                                  None,
                                  nclass=nclasses,
                                  lshape=(3, H, W))
    out = model.get_outputs(inference_set)
    print "Recognized as " + class_names[out.argmax()]
Exemplo n.º 8
0
    def run(self):
        while True:
            if not autonomous:
                debug_print("Exiting autonomous thread")
                break

            # Grab a still frame
            stream = picamera.array.PiRGBArray(camera)
            camera.capture(stream, 'rgb', use_video_port=True)

            start_time = time.time()
            debug_print("Grabbed a still frame")
            image = Image.fromarray(stream.array)
            image = image.resize(size, Image.ANTIALIAS)
            if (debug):
                image.save(
                    my_dir + "train/debug/capture" + str(self.cnt) + ".png",
                    "PNG")
                self.cnt = self.cnt + 1
            r, g, b = image.split()
            image = Image.merge("RGB", (b, g, r))
            image = np.asarray(image, dtype=np.float32)
            image = np.transpose(image, (2, 0, 1))
            x_new = image.reshape(1, 3 * W * H) - 127

            # Run neural network
            inference_set = ArrayIterator(x_new,
                                          None,
                                          nclass=nclasses,
                                          lshape=(3, H, W))
            out = model.get_outputs(inference_set)
            debug_print("--- %s seconds per decision --- " %
                        (time.time() - start_time))
            decision = out.argmax()
            debug_print(class_names[decision])
            send_cmd(decision)
Exemplo n.º 9
0
# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))

# make dataset
path = load_imdb(path=args.data_dir)
(X_train,
 y_train), (X_test, y_test), nclass = pad_data(path,
                                               vocab_size=vocab_size,
                                               sentence_length=sentence_length)

print "Vocab size - ", vocab_size
print "Sentence Length - ", sentence_length
print "# of train sentences", X_train.shape[0]
print "# of test sentence", X_test.shape[0]

train_set = ArrayIterator(X_train, y_train, nclass=2)
valid_set = ArrayIterator(X_test, y_test, nclass=2)

# weight initialization
uni = Uniform(low=-0.1 / embedding_dim, high=0.1 / embedding_dim)
g_uni = GlorotUniform()

if args.rlayer_type == 'lstm':
    rlayer = LSTM(hidden_size,
                  g_uni,
                  activation=Tanh(),
                  gate_activation=Logistic(),
                  reset_cells=True)
elif args.rlayer_type == 'bilstm':
    rlayer = DeepBiLSTM(hidden_size,
                        g_uni,
Exemplo n.º 10
0
    def __init__(self,
                 X_dict,
                 time_steps,
                 forward=1,
                 return_sequences=True,
                 randomize=False):
        """
        Implements loading of given data into backend tensor objects. If the backend is specific
        to an accelerator device, the data is copied over to that device.

        Args:
            X (ndarray): Input sequence with feature size within the dataset.
                         Shape should be specified as (num examples, feature size]
            time_steps (int): The number of examples to be put into one sequence.
            forward (int, optional): how many forward steps the sequence should predict. default
                                     is 1, which is the next example
            return_sequences (boolean, optional): whether the target is a sequence or single step.
                                                  Also determines whether data will be formatted
                                                  as strides or rolling windows.
                                                  If true, target value be a sequence, input data
                                                  will be reshaped as strides.  If false, target
                                                  value will be a single step, input data will be
                                                  a rolling_window
        """
        def rolling_window(spikes, stim, lag):
            """
            Convert a into time-lagged vectors

            a    : (n, p)
            lag  : time steps used for prediction

            returns  (n-lag+1, lag, p)  array

            (Building time-lagged vectors is not necessary for neon.)
            """
            assert spikes.shape[0] > lag
            assert stim.shape[0] > lag

            spikes_shape = [spikes.shape[0] - lag + 1, lag, spikes.shape[-1]]
            stim_shape = [stim.shape[0] - lag + 1, lag, stim.shape[-1]]
            spikes_strides = [
                spikes.strides[0], spikes.strides[0], spikes.strides[-1]
            ]
            stim_strides = [stim.strides[0], stim.strides[0], stim.strides[-1]]

            spikes_out = np.lib.stride_tricks.as_strided(
                spikes, shape=spikes_shape, strides=spikes_strides)
            stim_out = np.lib.stride_tricks.as_strided(stim,
                                                       shape=stim_shape,
                                                       strides=stim_strides)
            return {'spikes': spikes_out, 'stim': stim_out}

        self.seq_length = time_steps
        self.forward = forward
        self.batch_index = 0
        self.nfeatures = self.nclass = X_dict['spikes'].shape[1]
        self.nsamples = X_dict['spikes'].shape[0]
        self.shape = (self.nfeatures, time_steps)
        self.return_sequences = return_sequences
        self.mean = X_dict['spikes'].mean(axis=0)

        target_steps = time_steps if return_sequences else 1
        # pre-allocate the device buffer to provide data for each minibatch
        # buffer size is nfeatures x (times * batch_size), which is handled by
        # backend.iobuf()
        self.X_dev = self.be.iobuf((self.nfeatures, time_steps))
        self.y_dev = self.be.iobuf((self.nfeatures, target_steps))

        if return_sequences is True:
            # truncate to make the data fit into multiples of batches
            extra_examples = self.nsamples % (self.be.bsz * time_steps)
            if extra_examples:
                X_dict['spikes'] = X_dict['spikes'][:-extra_examples]
                X_dict['stim'] = X_dict['stim'][:-extra_examples]

            # calculate how many batches
            self.nsamples -= extra_examples
            self.nbatches = self.nsamples // (self.be.bsz * time_steps)
            self.ndata = self.nbatches * self.be.bsz * time_steps  # no leftovers

            # y is the lagged version of X
            y = np.concatenate((X[forward:], X[:forward]))
            self.y_series = y
            # reshape this way so sequence is continuous along the batches
            self.X = X.reshape(self.be.bsz, self.nbatches, time_steps,
                               self.nfeatures)
            self.y = y.reshape(self.be.bsz, self.nbatches, time_steps,
                               self.nfeatures)
        else:
            X_dict['lag'] = time_steps
            self.X = rolling_window(**X_dict)
            self.X['spikes'] = self.X['spikes'][:-1]
            self.X['stim'] = self.X['stim'][:-1]
            self.y = X_dict['spikes'][time_steps:]

            self.nsamples = self.X['spikes'].shape[0]
            extra_examples = self.nsamples % (self.be.bsz)
            if extra_examples:
                self.X['spikes'] = self.X['spikes'][:-extra_examples]
                self.X['stim'] = self.X['stim'][:-extra_examples]
                self.y = self.y[:-extra_examples]

            # calculate how many batches
            self.nsamples -= extra_examples
            self.nbatches = self.nsamples // self.be.bsz
            self.ndata = self.nbatches * self.be.bsz
            self.y_series = self.y
            self.spike_series = self.X['spikes']
            self.stim_series = self.X['stim']

            Xshape = (self.nbatches, self.be.bsz, time_steps, self.nfeatures)
            Yshape = (self.nbatches, self.be.bsz, 1, self.nfeatures)
            #self.X = self.X.reshape(Xshape).transpose(1, 0, 2, 3)
            #self.y = self.y.reshape(Yshape).transpose(1, 0, 2, 3)

            return ArrayIterator(X=[self.spike_series, self.stim_series],
                                 y=self.y_series,
                                 make_onehot=False)
Exemplo n.º 11
0
    print("Loaded " + test_file_name)
    image.show()
    image = image.resize(size, Image.ANTIALIAS)
    r, g, b = image.split()
    image = Image.merge("RGB", (b, g, r))
    image = np.asarray(image, dtype=np.float32)
    image = np.transpose(image, (2, 0, 1))
    return image.reshape(1, L) - 127


x_new[0] = load_sample(iamge_dir + "forward.jpg")
x_new[1] = load_sample(image_dir + "right.jpg")
x_new[2] = load_sample(image_dir + "left.jpg")
x_new[3] = load_sample(image_dir + "backward.jpg")

# Run neural network
inference_set = ArrayIterator(x_new, None, nclass=nclasses, lshape=(3, H, W))
out = mlp.get_outputs(inference_set)
# print out

print(class_names[out[0].argmax()])
print(class_names[out[1].argmax()])
print(class_names[out[2].argmax()])
print(class_names[out[3].argmax()])

# Sanity check 2
out = mlp.get_outputs(test)
print "Validation set result:"
print(out.argmax(1))
print "Compare the above to ground truth in dora/train/neon/val_file.csv.gz"
Exemplo n.º 12
0
    def benchmark(self):
        for d in self.devices:
            b = d if (self.backends is None) or (
                "mkl" not in self.backends) else "mkl"
            print("Use {} as backend.".format(b))

            # Common suffix
            suffix = "neon_{}_{}_{}by{}_{}".format(b, self.dataset,
                                                   self.resize_size[0],
                                                   self.resize_size[1],
                                                   self.preprocessing)

            # Set up backend
            # backend: 'cpu' for single cpu, 'mkl' for cpu using mkl library, and 'gpu' for gpu
            be = gen_backend(backend=b,
                             batch_size=self.batch_size,
                             rng_seed=542,
                             datatype=np.float32)

            # Prepare training/validation/testing sets
            neon_train_set = ArrayIterator(X=np.asarray(
                [t.flatten().astype('float32') / 255 for t in self.x_train]),
                                           y=np.asarray(self.y_train),
                                           make_onehot=True,
                                           nclass=self.class_num,
                                           lshape=(3, self.resize_size[0],
                                                   self.resize_size[1]))
            neon_valid_set = ArrayIterator(X=np.asarray(
                [t.flatten().astype('float32') / 255 for t in self.x_valid]),
                                           y=np.asarray(self.y_valid),
                                           make_onehot=True,
                                           nclass=self.class_num,
                                           lshape=(3, self.resize_size[0],
                                                   self.resize_size[1]))
            neon_test_set = ArrayIterator(X=np.asarray([
                t.flatten().astype('float32') / 255 for t in self.testImages
            ]),
                                          y=np.asarray(self.testLabels),
                                          make_onehot=True,
                                          nclass=self.class_num,
                                          lshape=(3, self.resize_size[0],
                                                  self.resize_size[1]))

            # Initialize model object
            self.neon_model = SelfModel(layers=self.constructCNN())

            # Costs
            neon_cost = GeneralizedCost(costfunc=CrossEntropyMulti())

            # Model summary
            self.neon_model.initialize(neon_train_set, neon_cost)
            print(self.neon_model)

            # Learning rules
            neon_optimizer = SGD(0.01,
                                 momentum_coef=0.9,
                                 schedule=ExpSchedule(0.2))
            # neon_optimizer = RMSProp(learning_rate=0.0001, decay_rate=0.95)

            # # Benchmark for 20 minibatches
            # d[b] = self.neon_model.benchmark(neon_train_set, cost=neon_cost, optimizer=neon_optimizer)

            # Reset model
            # self.neon_model = None
            # self.neon_model = Model(layers=layers)
            # self.neon_model.initialize(neon_train_set, neon_cost)

            # Callbacks: validate on validation set
            callbacks = Callbacks(
                self.neon_model,
                eval_set=neon_valid_set,
                metric=Misclassification(3),
                output_file="./saved_data/{}/{}/callback_data_{}.h5".format(
                    self.network_type, d, suffix))
            callbacks.add_callback(
                SelfCallback(eval_set=neon_valid_set,
                             test_set=neon_test_set,
                             epoch_freq=1))

            # Fit
            start = time.time()
            self.neon_model.fit(neon_train_set,
                                optimizer=neon_optimizer,
                                num_epochs=self.epoch_num,
                                cost=neon_cost,
                                callbacks=callbacks)
            print("Neon training finishes in {:.2f} seconds.".format(
                time.time() - start))

            # Result
            # results = self.neon_model.get_outputs(neon_valid_set)

            # Print error on validation set
            start = time.time()
            neon_error_mis = self.neon_model.eval(
                neon_valid_set, metric=Misclassification()) * 100
            print(
                'Misclassification error = {:.1f}%. Finished in {:.2f} seconds.'
                .format(neon_error_mis[0],
                        time.time() - start))

            # start = time.time()
            # neon_error_top3 = self.neon_model.eval(neon_valid_set, metric=TopKMisclassification(3))*100
            # print('Top 3 Misclassification error = {:.1f}%. Finished in {:.2f} seconds.'.format(neon_error_top3[2], time.time() - start))

            # start = time.time()
            # neon_error_top5 = self.neon_model.eval(neon_valid_set, metric=TopKMisclassification(5))*100
            # print('Top 5 Misclassification error = {:.1f}%. Finished in {:.2f} seconds.'.format(neon_error_top5[2], time.time() - start))

            self.neon_model.save_params("./saved_models/{}/{}/{}.prm".format(
                self.network_type, d, suffix))

            # Print error on test set
            start = time.time()
            neon_error_mis_t = self.neon_model.eval(
                neon_test_set, metric=Misclassification()) * 100
            print(
                'Misclassification error = {:.1f}% on test set. Finished in {:.2f} seconds.'
                .format(neon_error_mis_t[0],
                        time.time() - start))

            # start = time.time()
            # neon_error_top3_t = self.neon_model.eval(neon_test_set, metric=TopKMisclassification(3))*100
            # print('Top 3 Misclassification error = {:.1f}% on test set. Finished in {:.2f} seconds.'.format(neon_error_top3_t[2], time.time() - start))

            # start = time.time()
            # neon_error_top5_t = self.neon_model.eval(neon_test_set, metric=TopKMisclassification(5))*100
            # print('Top 5 Misclassification error = {:.1f}% on test set. Finished in {:.2f} seconds.'.format(neon_error_top5_t[2], time.time() - start))

            cleanup_backend()
            self.neon_model = None
Exemplo n.º 13
0
                                                    y,
                                                    train_size=0.9,
                                                    random_state=42)
print(X_train.shape, 'X train shape')
print(y_train.shape, 'y train shape')

parser = NeonArgparser(__doc__)
parser.add_argument('--kbatch',
                    type=int,
                    default=1,
                    help='number of data batches per noise batch in training')
args = parser.parse_args()

gen_backend(backend='cpu', batch_size=10)
train_set = ArrayIterator(X=X_train,
                          y=y_train,
                          nclass=2,
                          lshape=(1, 25, 25, 25))
valid_set = ArrayIterator(X=X_test, y=y_test, nclass=2)

gen_backend(backend='cpu', batch_size=10)
train_set = ArrayIterator(X=X_train,
                          y=y_train,
                          nclass=2,
                          lshape=(1, 25, 25, 25))
valid_set = ArrayIterator(X=X_test, y=y_test, nclass=2)

init = Gaussian(scale=0.01)

# discriminiator using convolution layers
lrelu = Rectlin(slope=0.1)  # leaky relu for discriminator
# sigmoid = Logistic() # sigmoid activation function
Exemplo n.º 14
0
print(np.max(X), 'max element')
print(np.min(X), 'min element')
# X -= mean
print(X.shape, 'X shape')
#print(np.max(X),'max element')
#print(np.min(X),'min element')
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    train_size=0.9,
                                                    random_state=42)
print(X_train.shape, 'X train shape')
print(y_train.shape, 'y train shape')

gen_backend(backend='gpu', batch_size=100)
train_set = ArrayIterator(X=X_train,
                          y=y_train,
                          nclass=2,
                          lshape=(1, 25, 25, 25))
valid_set = ArrayIterator(X=X_test, y=y_test, nclass=2)

#tate=lt.plot(X_train[0, 12])
#plt.savefigure('data_img.png')

# setup weight initialization function
init = Gaussian(scale=0.01)

# discriminiator using convolution layers
lrelu = Rectlin(slope=0.1)  # leaky relu for discriminator
# sigmoid = Logistic() # sigmoid activation function
conv1 = dict(init=init, batch_norm=False, activation=lrelu, bias=init)
conv2 = dict(init=init,
             batch_norm=False,
Exemplo n.º 15
0
    filename = "splice_data_CAGT.csv"

    names = ['class', 'C', 'A', 'G', 'T']

    # Number of classes EI, IE and N
    nclass = 3

    # 20% of data used for validation
    validation_size = 0.20

    train_data, valid_data, train_label, valid_label = gene.load_data(
        ip_file_path, filename, names, validation_size=0.20)

    train_iter = ArrayIterator(train_data,
                               train_label,
                               nclass=nclass,
                               lshape=(1, 4),
                               name='train')
    val_iter = ArrayIterator(valid_data,
                             valid_label,
                             nclass=nclass,
                             lshape=(1, 4),
                             name='valid')

    # weight
    w = Xavier()

    # bias
    b = Constant()

    # setup model layers