Exemple #1
0
def create_network():
    # Placeholders for X and Y data
    inputs = tf.placeholder(tf.float32,
                            shape=(None, num_time_samples, num_channels))
    targets = tf.placeholder(tf.int32, shape=(None, num_time_samples))

    # Dialated convolutions will be recursively applied.
    h = inputs
    hs = []
    for b in range(num_blocks):
        for i in range(num_layers):
            rate = 2**i
            name = 'b{}-l{}'.format(b, i)
            h = dilated_conv1d(h, num_hidden, rate=rate, name=name)
            hs.append(h)

    outputs = conv1d(h,
                     num_classes,
                     filter_width=1,
                     gain=1.0,
                     activation=None,
                     bias=True)

    costs = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=outputs,
                                                           labels=targets)
    cost = tf.reduce_mean(costs)

    train_step = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    return sess, cost, train_step, inputs, targets
Exemple #2
0
    def __init__(self, num_time_samples, num_channels, gpu_fraction):
        inputs = tf.placeholder(tf.float32,
                                shape=(None, num_time_samples, num_channels))
        targets = tf.placeholder(tf.int32, shape=(None, num_time_samples))

        h = inputs
        for b in range(2):
            for i in range(14):
                rate = 2**i
                name = 'b{}-l{}'.format(b, i)
                h = dilated_conv1d(h, 128, rate=rate, name=name)

        outputs = conv1d(h,
                         256,
                         filter_width=1,
                         gain=1.0,
                         activation=None,
                         bias=True)

        cost = tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(outputs, targets))

        train_step = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)

        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=gpu_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        sess.run(tf.initialize_all_variables())

        self.inputs = inputs
        self.targets = targets
        self.outputs = outputs
        self.cost = cost
        self.train_step = train_step
        self.sess = sess
Exemple #3
0
    def __init__(self,
                 num_time_samples,
                 num_channels=1,
                 num_classes=256,
                 num_blocks=2,
                 num_layers=14,
                 num_hidden=128,
                 gpu_fraction=1.0):

        self.num_time_samples = num_time_samples
        self.num_channels = num_channels
        self.num_classes = num_classes
        self.num_blocks = num_blocks
        self.num_layers = num_layers
        self.num_hidden = num_hidden
        self.gpu_fraction = gpu_fraction

        inputs = tf.placeholder(tf.float32,
                                shape=(None, num_time_samples, num_channels))
        targets = tf.placeholder(tf.int32, shape=(None, num_time_samples))

        h = inputs
        hs = []
        for b in range(num_blocks):
            for i in range(num_layers):
                rate = 2**i
                name = 'b{}-l{}'.format(b, i)
                h = dilated_conv1d(h, num_hidden, rate=rate, name=name)
                hs.append(h)

        outputs = conv1d(h,
                         num_classes,
                         filter_width=1,
                         gain=1.0,
                         activation=None,
                         bias=True)

        costs = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets,
                                                               logits=outputs)
        cost = tf.reduce_mean(costs)

        train_step = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)

        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=gpu_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        sess.run(tf.global_variables_initializer())

        self.inputs = inputs
        self.targets = targets
        self.outputs = outputs
        self.hs = hs
        self.costs = costs
        self.cost = cost
        self.train_step = train_step
        self.sess = sess
Exemple #4
0
    def __init__(self,
                 num_time_samples=10000,
                 num_channels=6,
                 num_classes=512,
                 num_blocks=2,
                 num_layers=14,
                 num_hidden=128,
                 batch_size=2,
                 gpu_num="0",
                 model_name="default_name"):

        self.num_time_samples = num_time_samples
        self.num_channels = num_channels
        self.num_classes = num_classes
        self.num_blocks = num_blocks
        self.num_layers = num_layers
        self.num_hidden = num_hidden
        self.gpu_num = gpu_num
        self.model_name = model_name
        self.batch_size = batch_size

        self.iter_save_fig = 5000
        self.iter_save_param = 10000
        self.iter_calc_loss = 2500
        self.iter_end_training = 500000

        inputs = tf.placeholder(tf.float32,
                                shape=(None, num_time_samples, num_channels))
        targets = tf.placeholder(tf.int32,
                                 shape=(None, num_time_samples, num_channels))

        h = inputs
        hs = []
        for b in range(num_blocks):
            for i in range(num_layers):
                rate = 2**i
                name = 'b{}-l{}'.format(b, i)
                h = dilated_conv1d(h, num_hidden, rate=rate, name=name)
                hs.append(h)

        outputs = conv1d(h,
                         num_classes * num_channels,
                         filter_width=1,
                         gain=1.0,
                         activation=None,
                         bias=True)

        print(outputs.shape)
        print(targets.shape)
        costs = 0
        for i in range(num_channels):
            #costs += tf.nn.sparse_softmax_cross_entropy_with_logits(
            #    logits=outputs[i*num_classes:(i+1)*num_classes], labels=targets[:,:,i])
            costs += tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=outputs[:, :, i * num_classes:(i + 1) * num_classes],
                labels=targets[:, :, i])
        cost = tf.reduce_mean(costs)

        train_step = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)

        self.config = tf.ConfigProto(gpu_options=tf.GPUOptions(
            visible_device_list=self.gpu_num))
        sess = tf.Session(config=self.config)
        self.saver = tf.train.Saver(max_to_keep=3)

        ckpt = tf.train.get_checkpoint_state("./ckpt_" + self.model_name + "/")
        if ckpt:
            last_model = ckpt.model_checkpoint_path
            self.saver.restore(sess, last_model)
            print("load: " + last_model)
        else:
            print("init")
            sess.run(tf.global_variables_initializer())
        self.inputs_ph = inputs
        self.targets_ph = targets
        self.outputs = outputs
        self.hs = hs
        self.costs = costs
        self.cost = cost
        self.train_step = train_step
        self.sess = sess

        # dataset
        self.bc_train = BitCoinDataset(self.num_time_samples)
        self.param_min = np.load("dataset/param_min.npy")
        self.param_max = np.load("dataset/param_max.npy")
        #self.ad_train = AccelerationDataset("acc_dataset_selected/train", "train10", 0)
        #self.ad_test = AccelerationDataset("acc_dataset_selected/test", "test10", self.num_time_samples)

        self.generate_init()
        self.count = 0
        """
Exemple #5
0
    def __init__(self,
                 num_time_samples=10000,
                 num_channels=1,
                 num_classes=256,
                 num_blocks=2,
                 num_layers=14,
                 num_hidden=128,
                 batch_size=2,
                 gpu_num="0",
                 model_name="default_name"):

        self.num_time_samples = num_time_samples
        self.num_channels = num_channels
        self.num_classes = num_classes
        self.num_blocks = num_blocks
        self.num_layers = num_layers
        self.num_hidden = num_hidden
        self.gpu_num = gpu_num
        self.model_name = model_name
        self.batch_size = batch_size

        self.iter_save_fig = 2500
        self.iter_save_param = 5000
        self.iter_calc_loss = 2500
        self.iter_end_training = 5000000

        inputs = tf.placeholder(tf.float32,
                                shape=(None, num_time_samples, num_channels))
        targets = tf.placeholder(tf.int32, shape=(None, num_time_samples))

        h = inputs
        hs = []
        for b in range(num_blocks):
            for i in range(num_layers):
                rate = 2**i
                name = 'b{}-l{}'.format(b, i)
                h = dilated_conv1d(h, num_hidden, rate=rate, name=name)
                hs.append(h)

        outputs = conv1d(h,
                         num_classes,
                         filter_width=1,
                         gain=1.0,
                         activation=None,
                         bias=True)

        costs = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=outputs,
                                                               labels=targets)
        cost = tf.reduce_mean(costs)

        train_step = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)

        self.config = tf.ConfigProto(gpu_options=tf.GPUOptions(
            visible_device_list=self.gpu_num))
        sess = tf.Session(config=self.config)
        self.saver = tf.train.Saver(max_to_keep=3)

        ckpt = tf.train.get_checkpoint_state("./ckpt_" + self.model_name + "/")
        if ckpt:
            last_model = ckpt.model_checkpoint_path
            self.saver.restore(sess, last_model)
            print("load: " + last_model)
        else:
            print("init")
            sess.run(tf.global_variables_initializer())
        self.inputs_ph = inputs
        self.targets_ph = targets
        self.outputs = outputs
        self.hs = hs
        self.costs = costs
        self.cost = cost
        self.train_step = train_step
        self.sess = sess

        # dataset
        self.ad_train = AccelerationDataset("acc_dataset_selected/train",
                                            "train10", 0)
        self.ad_test = AccelerationDataset("acc_dataset_selected/test",
                                           "test10", self.num_time_samples)

        self.generate_init()
        self.count = 0

        #print(self.ad_train.shape)
        #print(self.ad_test.shape)
        batch_x, batch_y = self.ad_test.getBatchTrain(False,
                                                      self.num_time_samples,
                                                      self.batch_size)
        batch_x = batch_x.reshape(
            (-1, self.num_time_samples, 10))  #self.num_input))
        batch_x_0 = batch_x[:, :,
                            0]  #.reshape((self.batch_size, self.num_time_samples,-1))
        bins = np.linspace(-1, 1, 256)
        #print(batch_x.shape)
        #print(batch_y.shape)
        # Quantize inputs.

        inputs_batch, targets_batch = [], []
        for batch in range(batch_x_0.shape[0]):
            x = batch_x_0[batch]
            y = batch_y[batch]
            inputs = np.digitize(x, bins, right=False) - 1
            inputs = bins[inputs][None, :, None]
            inputs_batch.append(inputs)

            # Encode targets as ints.
            targets = (np.digitize(y, bins, right=False) - 1)[None, :]
            targets_batch.append(targets)

        inputs_batch = np.vstack(inputs_batch)
        inputs_batch = np.concatenate((inputs_batch, batch_x[:, :, 1:]),
                                      axis=2)
        targets_batch = np.vstack(targets_batch)

        self.test_inputs = inputs_batch
        self.test_targets = np.array(bins[targets_batch])