Beispiel #1
0
    def __init__(self, num_time_samples, num_channels, gpu_fraction):
        inputs = tf.placeholder(tf.float32,
                                shape=(None, num_time_samples, num_channels))
        targets = tf.placeholder(tf.int32, shape=(None, num_time_samples))

        h = inputs
        for b in range(2):
            for i in range(14):
                rate = 2**i
                name = 'b{}-l{}'.format(b, i)
                h = dilated_conv1d(h, 128, rate=rate, name=name)

        outputs = conv1d(h,
                         256,
                         filter_width=1,
                         gain=1.0,
                         activation=None,
                         bias=True)

        cost = tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(outputs, targets))

        train_step = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)

        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=gpu_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        sess.run(tf.initialize_all_variables())

        self.inputs = inputs
        self.targets = targets
        self.outputs = outputs
        self.cost = cost
        self.train_step = train_step
        self.sess = sess
Beispiel #2
0
    def __init__(self,
                 num_time_samples,
                 num_channels=1,
                 num_classes=256,
                 num_blocks=2,
                 num_layers=14,
                 num_hidden=128,
                 gpu_fraction=1.0):

        self.num_time_samples = num_time_samples
        self.num_channels = num_channels
        self.num_classes = num_classes
        self.num_blocks = num_blocks
        self.num_layers = num_layers
        self.num_hidden = num_hidden
        self.gpu_fraction = gpu_fraction

        inputs = tf.compat.v1.placeholder(
            tf.float32, shape=(None, num_time_samples, num_channels))
        targets = tf.compat.v1.placeholder(
            tf.int32, shape=(None, num_time_samples))

        h = inputs
        hs = []
        for b in range(num_blocks):
            for i in range(num_layers):
                rate = 2**i
                name = 'b{}-l{}'.format(b, i)
                h = dilated_conv1d(h, num_hidden, rate=rate, name=name)
                hs.append(h)

        outputs = conv1d(h,
                         num_classes,
                         filter_width=1,
                         gain=1.0,
                         activation=None,
                         bias=True)

        costs = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=outputs, labels=targets)
        cost = tf.compat.v1.reduce_mean(costs)

        train_step = tf.compat.v1.train.AdamOptimizer(
            learning_rate=0.001).minimize(cost)

        # gpu_options = tf.GPUOptions(
        #     per_process_gpu_memory_fraction=gpu_fraction)
        # sess = tf.compat.v1.Session(
        #     config=tf.ConfigProto(gpu_options=gpu_options))
        sess = tf.compat.v1.Session()
        sess.run(tf.compat.v1.global_variables_initializer())

        self.inputs = inputs
        self.targets = targets
        self.outputs = outputs
        self.hs = hs
        self.costs = costs
        self.cost = cost
        self.train_step = train_step
        self.sess = sess
Beispiel #3
0
    def create_model(self,
                     model_input,
                     vocab_size,
                     l2_penalty=1e-5,
                     is_training=True,
                     dense_labels=None,
                     **unused_params):

        num_blocks = 3
        num_layers = 3
        num_hidden = 512

        hs = []
        h = model_input
        for b in range(num_blocks):
            for i in range(num_layers):
                rate = 2**i
                name = 'b{}-l{}'.format(b, i)
                h = dilated_conv1d(h, num_hidden, rate=rate, name=name)
                hs.append(h)

        h = tf.reduce_max(h, axis=1)
        logits = slim.fully_connected(
            h,
            vocab_size,
            activation_fn=None,
            weights_regularizer=slim.l2_regularizer(l2_penalty))
        # outputs = conv1d(h,
        # num_classes,
        # filter_width=1,
        # gain=1.0,
        # activation=None,
        # bias=True)

        labels = tf.cast(dense_labels, tf.float32)
        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,
                                                       logits=logits)
        loss = tf.reduce_mean(tf.reduce_sum(loss, 1))
        preds = tf.nn.sigmoid(logits)

        return {"predictions": preds, "loss": loss}
Beispiel #4
0
    def __init__(
        self,
        # num_time_samples,
        num_channels=1,
        num_classes=256,
        num_blocks=2,
        num_layers=14,
        num_hidden=128,
        gpu_fraction=1.0,
        prob_model_type='softmax',
    ):

        # self.num_time_samples = num_time_samples
        self.num_channels = num_channels
        self.num_classes = num_classes
        self.num_blocks = num_blocks
        self.num_layers = num_layers
        self.num_hidden = num_hidden
        self.gpu_fraction = gpu_fraction
        self.prob_model_type = prob_model_type

        inputs = tf.placeholder(tf.float32, shape=(None, None, num_channels))
        # targets = tf.placeholder(tf.int32, shape=(None, num_time_samples))

        h = inputs
        hs = []
        for b in range(num_blocks):
            for i in range(num_layers):
                rate = 2**i
                name = 'b{}-l{}'.format(b, i)
                h = dilated_conv1d(h, num_hidden, rate=rate, name=name)
                hs.append(h)

        outputs = conv1d(h,
                         num_classes,
                         filter_width=1,
                         gain=1.0,
                         activation=None,
                         bias=True)
        raw_outputs_shape = tf.shape(outputs)
        outputs = tf.reshape(outputs, [-1, num_classes])
        if prob_model_type == 'softmax':
            self.prob_model = MultinomialLayer(outputs,
                                               num_classes,
                                               num_classes,
                                               one_hot=False)
        elif prob_model_type == 'sdp':
            self.prob_model = LocallySmoothedMultiscaleLayer(outputs,
                                                             num_classes,
                                                             num_classes,
                                                             one_hot=False,
                                                             k=1,
                                                             lam=0)

        train_step = tf.train.AdamOptimizer(learning_rate=0.001).minimize(
            self.prob_model.train_loss)

        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=gpu_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        sess.run(tf.global_variables_initializer())

        self.inputs = inputs
        # self.targets = targets
        self.outputs = outputs
        self.hs = hs
        # self.costs = costs
        self.cost = self.prob_model.test_loss
        self.train_step = train_step
        self.sess = sess