Beispiel #1
0
 def _load_training_data(self):
     train_data = []
     train_labels = []
     _spinner = spinner(jump=300)
     for i in range(self.num_train_data):
         data, label = _create_bracket_data(self.sequence_length, self.num_bracket_types, self.num_noise_types,
                                            self.max_unmatched, self.normalised)
         train_data.append(data)
         train_labels.append(label)
         percentage = float(i) / float(self.num_train_data)
         _spinner.print_spinner(percentage * 100)
     _spinner.print_spinner(100.0)
     self._training_data = np.array(train_data), np.array(train_labels)
     super()._save_training_data()
Beispiel #2
0
def main(opts):
    server = OSCServer((opts.ip_addres, int(opts.port)))
    server.handle_timeout = types.MethodType(handle_timeout, server)
    server.addMsgHandler("/user", user_callback)
    server.addMsgHandler("/quit", quit_callback)
    log_post('INFO: listening on to %s:%s' % (opts.ip_addres, opts.port))
    spinner_ = spinner()
    while run:
        each_frame(server)

        sys.stdout.write(spinner_.next())
        sys.stdout.flush()
        sleep(0.5)
        sys.stdout.write('\b')

    server.close()
Beispiel #3
0
def wait_for_available(image_id):
	sleep_time = 10
	total_time = 0
	image = vmcreate.conn.get_image(image_id)
	print("\n***** Waiting for image to become available *****")

	with utils.spinner():
		while image.state != 'available':
			time.sleep(sleep_time)
			total_time += sleep_time
			image = vmcreate.conn.get_image(image_id)
			if total_time > 1800:
				print("\nTimed out waiting for image to become available")
				return False

	return True
Beispiel #4
0
def main(opts):
    # connect to SuperCollider
    client = OSC.OSCClient()
    client.connect((opts.ip_addres, int(opts.port)))
    oscmsg = OSC.OSCMessage()
    log_post('INFO: connected to client to %s:%s' % (
        opts.ip_addres, opts.port
        ))

    # setup GPIO
    pin = int(opts.pin_number)
    state = False
    try:
        GPIO.setmode(GPIO.BCM)
        GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)

        state = GPIO.input(pin)
    except NameError as err:
        log_post('ERROR: %s' % err)

    log_post('INFO: waiting to state change...')
    spinner_ = spinner()
    while True:
        # read GPIO.
        try:
            read_state = GPIO.input(pin)
        except NameError:
            read_state = False

        if state != read_state:
            state = read_state
            send_osc(client, oscmsg)

        sys.stdout.write(spinner_.next())
        sys.stdout.flush()
        time.sleep(0.1)
        sys.stdout.write('\b')

    try:
        GPIO.cleanup()
    except NameError:
        pass
 def _load_with_correct_shape(self, data_type):
     raw_images, raw_labels = load_mnist(type(self).__name__, data_type)
     raw_images = raw_images / 255.0
     if self.shrink == True:
         new_images = []
         for image in raw_images:
             new_images.append(
                 skimage.measure.block_reduce(image, (2, 2), np.max))
         raw_images = np.array(new_images)
     current_shape = raw_images.shape
     chain_length = current_shape[1] * current_shape[2]
     ones = np.ones(chain_length)
     raw_data = raw_images.reshape(current_shape[0], chain_length)
     _spinner = spinner(jump=300)
     data = []
     for element in raw_data:
         new_element = np.column_stack((ones, element))
         data.append(new_element)
     data = np.array(data)
     print(data.shape)
     labels = convert_to_onehot(raw_labels)
     return (data, labels)
Beispiel #6
0
	i += 1

print("\n***** Creating mount point %(mount_point)s *****" % locals())
utils.execute("mkdir -p %(mount_point)s" % locals())
mount_point_created = True

if fs.f_bfree <= fs.f_blocks * 2 / 3:
	if cloud:
		get_volume(disk_size_in_GBs * 2, instance, mount_point)
	else:
		print("Not enough space to bundle")
		exit(1)

if custom_kernel_path:
	print("\n***** Bundling kernel *****")
	with utils.spinner():
		utils.execute("euca-bundle-image -i %(custom_kernel_path)s -d %(mount_point)s --kernel true -p %(kernel_name)s" % locals())

	kernel_name += '.manifest.xml'

	print("\n***** Uploading kernel *****")
	with utils.spinner():
		utils.execute("euca-upload-bundle -b %(bucket_name)s -m %(mount_point)s/%(kernel_name)s" % locals())

	print("\n***** Registering kernel *****")
	kernel_id = utils.execute("euca-register %(bucket_name)s/%(kernel_name)s" % locals())[0].split()[1]

	if private:
		make_private(kernel_id)

if custom_ramdisk_path:
    def _load_all_data(self):
        """
        _load_all_data is responsible for reading the .csv files downloaded in the initialisation.
        The results are saved into _all_data
        :return: nothing
        """
        _all_datapoints = []
        _all_labels = []
        counter = 0
        factor = 2 / self.data_length
        new_length = int(self.data_length / 2)
        ones = np.ones(new_length)
        _spinner = spinner()
        for i in range(10):

            #_spinner.print_spinner(0.0)
            percentage = int((i / 10) * 100)

            filename = self._uncompressed_data_path + "Participant_" + str(
                i + 1) + ".csv"
            with open(filename, 'r') as f:
                reader = csv.reader(f)
                header = next(reader)
                headings = next(reader)
                jump_index = 0
                index = 0
                prev_row_label = None
                row_label = 0
                data = []
                for index, row in enumerate(reader):

                    _spinner.print_spinner(percentage)

                    if index >= jump_index:
                        if index != 0 and (index) % self.data_length == 0:
                            data = np.abs(np.fft.rfft(data, axis=0) *
                                          factor)[:-1]
                            data = np.column_stack((ones, data))
                            _all_datapoints.append(np.array(data))
                            _all_labels.append(row_label.value)
                            data = []
                            prev_row_label = None
                        data.append(
                            np.array([
                                np.float32(row[1]),
                                np.float32(row[2]),
                                np.float32(row[3])
                            ]))
                        row_label = activityLabels[row[-1]]
                        if prev_row_label is not None:
                            if row_label != prev_row_label:
                                modulo = index % self.data_length
                                jump_index = (modulo + 1) * self.data_length
                        prev_row_label = row_label
        _all_datapoints = np.array(_all_datapoints)
        _all_labels = convert_to_onehot(np.array(_all_labels))
        permutation = np.random.permutation(len(_all_datapoints))
        #_all_datapoints = _all_datapoints[permutation]
        #_all_labels = _all_labels[permutation]
        #_all_datapoints[:,:,1:] = np.tanh(_all_datapoints[:,:,1:])

        _spinner.print_spinner(100.0)

        print(_all_datapoints.shape)
        print(_all_labels.shape)
        print(_all_labels[0])
        self._all_data = (_all_datapoints, _all_labels)
        np.save(self._all_data_path, _all_datapoints)
        np.save(self._all_labels_path, _all_labels)
def _preprocess_images(data, size, shrink=True):
    """
    This function preprocesses images into format from the paper
    Supervised learning with quantum-inspired tensor networks

    :param data: tensorflow dataset
        The tensorflow dataset we are reading from
    :param size: integer
        The size of the dataset we wish to extract
    :param shrink: boolean
        Whether the image is shrunk using max pooling or not.
        If true, then the image is shrunk to 14x14 before being flattened.
        If false, the image is not shrunk.
    :return: (numpy array, numpy array)
        Returns (data points, results) in the format
        ([batch, MPS input size, other dimensions], [batch, classifications])
    """

    # written this way because originally, this was the only function and would read directly.
    # TODO: change all references to "mnist" with data
    mnist = data

    sess = tf.Session()
    data = []
    labels = []

    # Tensorflow operators / placeholders to resize the data from MNIST to format from paper
    # Resize images from 28*28 to 14*14
    image = tf.placeholder(tf.float32, shape=[784])
    if shrink:
        reshaped_image = tf.reshape(image, [-1, 28, 28, 1])
        pool = tf.nn.avg_pool(reshaped_image,
                              ksize=[1, 2, 2, 1],
                              strides=[1, 2, 2, 1],
                              padding='SAME')
        #pooled_image = tf.placeholder(tf.float32, shape=[1, 14, 14, 1])
        snaked_image = tf.reshape(pool, shape=[196])

        ones = tf.ones([196], dtype=tf.float32)
    else:
        snaked_image = image
        ones = tf.ones([784], dtype=tf.float32)
    phi = tf.stack([ones, snaked_image], axis=1)

    _spinner = spinner(jump=300)

    # Loop through all the elements in the dataset and resize
    with sess.as_default():
        sess.run(tf.global_variables_initializer())
        writer = tf.summary.FileWriter("output", sess.graph)
        writer.close()
        counter = 0

        for i in range(20):
            percentage = int((i / 20) * 100)
            batch = mnist.next_batch(int(size / 20), shuffle=False)
            images = batch[0]
            for index, element in enumerate(images):
                #pooled = sess.run(pool,
                #                  feed_dict={reshaped_image: sess.run(reshaped_image,
                #                                                      feed_dict={image: element})})
                data.append(np.array(sess.run(phi, feed_dict={image:
                                                              element})))
                labels.append(np.array(batch[1][index]))
                _spinner.print_spinner(percentage)
    _spinner.print_spinner(100.0)
    return (np.array(data), np.array(labels))
Beispiel #9
0
    def _load_all_data(self):
        _all_datapoints = []
        _all_labels = []
        counter = 0
        new_length = int(self.data_length / 2)
        result_length = self.result_length
        ones = np.ones(result_length)
        _spinner = spinner(200)
        counter = np.array([0, 0, 0, 0])
        csv_filename = self._uncompressed_data_path + "REFERENCE.csv"
        with open(csv_filename, 'r') as f:
            reader = csv.reader(f)
            for index, row in enumerate(reader):
                current_data = []
                current_loc = self.data_length
                percentage = int(100 * index / 8528.0)
                _spinner.print_spinner(percentage)
                label = cardioLabels.noisy
                if row[1] != "~":
                    label = cardioLabels[row[1]]
                sorted_indices = counter.argsort()
                if label.value != sorted_indices[-1]:
                    record = self._uncompressed_data_path + row[0] + ".mat"
                    mat_data = scipy.io.loadmat(record)
                    samples = mat_data["val"]
                    samples = samples.flatten()
                    len_left = len(samples)
                    data = samples[:self.data_length]
                    data = np.abs(np.fft.rfft(data))[5:result_length + 5]
                    factor = 1 / np.amax(data)
                    data = data * factor
                    data = np.column_stack((ones, data))
                    _all_labels.append(label.value)
                    _all_datapoints.append(data)
                    current_data.append(data)
                    counter[label.value] = counter[label.value] + 1
                    len_left -= self.data_length
                    while (len_left > self.data_length
                           and (label.value == 0
                                or label.value == sorted_indices[0])
                           and label.value != 3):
                        data = samples[current_loc:current_loc +
                                       self.data_length]
                        data = np.abs(np.fft.rfft(data))[5:result_length + 5]
                        factor = 1 / np.amax(data)
                        data = data * factor
                        data = np.column_stack((ones, data))
                        label = cardioLabels.noisy
                        if row[1] != "~":
                            label = cardioLabels[row[1]]
                        _all_labels.append(label.value)
                        _all_datapoints.append(data)
                        current_data.append(data)
                        counter[label.value] = counter[label.value] + 1
                        current_loc += self.data_length + 1
                        len_left -= self.data_length
                    if counter[label.value] < counter[
                            sorted_indices[2]] or label.value == 0:
                        for data in current_data:
                            _all_datapoints.append(data)
                            _all_labels.append(label.value)
                            _all_datapoints.append(data)
                            _all_labels.append(label.value)
                            counter[label.value] = counter[label.value] + 1
                            counter[label.value] = counter[label.value] + 1
        _all_datapoints = np.array(_all_datapoints)
        _all_labels = convert_to_onehot(np.array(_all_labels))

        _spinner.print_spinner(100.0)

        print(_all_datapoints.shape)
        print(_all_labels.shape)
        print(_all_labels[0])
        self._all_data = (_all_datapoints, _all_labels)
        print("datapoints by class:", counter)
        np.save(self._all_data_path, _all_datapoints)
        np.save(self._all_labels_path, _all_labels)