def _compile_model(self, data):
        """ Compiles the model in TensorFlow """
        data = tf.placeholder(tf.float32, [None, 4, self.seq_length, 1])
        conv_layer1 = _get_conv_layer(data, 'first_conv', nb_rows=4,
                                           nb_cols=self.conv_width[0],
                                           nb_filters=self.num_filters[0],
                                           keep_prob=keep_prob)
        conv_layer2 = _get_conv_layer(conv_layer1, 'second_conv', nb_rows=1,
                                           nb_cols=self.conv_width[1],
                                           nb_filters=self.num_filters[1],
                                           keep_prob=keep_prob)
        conv_layer3 = _get_conv_layer(conv_layer2, 'third_conv', nb_rows=1,
                                           nb_cols=self.conv_width[2],
                                           nb_filters=self.num_filters[2],
                                           keep_prob=keep_prob)
        max_pool = _get_max_pool_layer(max_pool, 'max_pool', nb_rows=1,
                                       nb_cols=25, strides=[1,2,2,1])

        with tf.variable_scope('fc_layer') as scope:
            flattened = tf.flatten(max_pool, [-1])
            print('FC LAYER SHAPE', flattened.get_shape()[0].value)
            dim = flattened.get_shape()[0].value
            weights = _variable_with_weight_decay('weights', shape=[dim, NUM_CLASSES],
                                                  stddev=0.04, wd=0.004)
            biases = _variable_on_gpu('biases', [NUM_CLASSES], tf.constant_initializer(0.0))
            self.compiled_model = tf.nn.sigmoid(tf.matmul(flattened, weights) + biases, name=scope.name)
def build_model(data):
    """
    Build a CNN using TF
    """
    
    keep_prob = tf.placeholder(tf.float32)
    num_filters_1 = 45
    num_filters_2 = 50
    num_filters_3 = 50
    
    with tf.variable_scope('first_conv') as scope:
        kernel = _variable_with_weight_decay('weights',
                                             shape=[4, 8, 1, num_filters_1],
                                             stddev=5e-2,
                                             wd=0.0)
        conv = tf.nn.conv2d(data, kernel, [1, 1, 1, 1], padding='SAME')
        biases = _variable_on_gpu('biases', [num_filters_1], tf.constant_initializer(0.0))
        bias = tf.nn.bias_add(conv, biases)
        conv1 = tf.nn.relu(bias)
        conv1_drop = tf.nn.dropout(conv1, keep_prob, name=scope.name)
    
    with tf.variable_scope('second_conv') as scope:
        kernel = _variable_with_weight_decay('weights', 
                                             shape=[1, 8, 1, num_filters_2],
                                             stddev=5e-2,
                                             wd=0.0)
        conv = tf.nn.conv2d(data, kernel, [1, 1, 1, 1], padding='SAME')
        biases = _variable_on_gpu('biases', [num_filters_2], tf.constant_initializer(0.0))
        bias = tf.nn.bias_add(conv, biases)
        conv2 = tf.nn.relu(bias)
        conv2_drop = tf.nn.dropout(conv2, keep_prob, name=scope.name)
    
    with tf.variable_scope('third_conv') as scope:
        kernel = _variable_with_weight_decay('weights', 
                                             shape=[1, 8, 1, num_filters_3],
                                             stddev=5e-2,
                                             wd=0.0)
        conv = tf.nn.confv2d(data, kernel, [1,1,1,1], padding='SAME')
        biases = _variable_on_gpu('biases', [num_filters_3], tf.constant_initializer(0.0))
        bias = tf.nn.bias_add(conv, biases)
        conv3 = tf.nn.relu(bias)
        conv3_drop = tf.nn.dropout(conv3, keep_prob, name=scope.name)
    
    with tf.variable_scope('max_pool') as scope:
        maxpool = tf.nn.max_pool(conv3_drop, ksize=[1, 25, 1, 1],
                         strides=[1, 2, 2, 1], padding='SAME', name=scope.name)
    
    with tf.variable_scope('fc_layer') as scope:
        flattened = tf.flatten(maxplool, [-1])
        print('FC LAYER SHAPE', flattened.get_shape().value)
        dim = flattened.get_shape()[0].value
        weights = _variable_with_weight_decay('weights', shape=[dim, NUM_CLASSES],
                                          stddev=0.04, wd=0.004)
        biases = _variable_on_gpu('biases', [NUM_CLASSES], tf.constant_initializer(0.0))
        sigmoid_result = tf.nn.sigmoid(tf.matmul(maxpool, weights) + biases, name=scope.name)
    
    return sigmoid_result
示例#3
0
	def __call__(self, x, reuse=True):
		with tf.variable_scope(self.name) as vs:
			if reuse:
				vs.reuse_variables()
			d = tcl.fully_connected(tf.flatten(x), 64, activation_fn=tf.nn.relu,normalizer_fn=tcl.batch_norm)
			d = tcl.fully_connected(d, 64,activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm)
			d = tcl.fully_connected(d, 64,activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm)
			logit = tcl.fully_connected(d, 1, activation_fn=None)

		return logit
示例#4
0
文件: nets.py 项目: 1202kbs/GAN
	def __call__(self, x, reuse=True):
		with tf.variable_scope(self.name) as vs:
			if reuse:
				vs.reuse_variables()
			d = tcl.fully_connected(tf.flatten(x), 64, activation_fn=tf.nn.relu,normalizer_fn=tcl.batch_norm)
			d = tcl.fully_connected(d, 64,activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm)
			d = tcl.fully_connected(d, 64,activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm)
			logit = tcl.fully_connected(d, 1, activation_fn=None)

		return logit
示例#5
0
文件: nets.py 项目: 1202kbs/GAN
	def __call__(self, x, reuse=True):
		with tf.variable_scope(self.name) as vs:
			if reuse:
				vs.reuse_variables()
			size = 64
			x = tcl.fully_connected(tf.flatten(x), 64, activation_fn=tf.nn.relu)
			x = tcl.fully_connected(x, 64,
						activation_fn=tf.nn.relu)
			x = tcl.fully_connected(x, 64,
						activation_fn=tf.nn.relu)
			logit = tcl.fully_connected(x, 1, activation_fn=None)

		return logit
示例#6
0
    def __call__(self, x, reuse=True):
        with tf.variable_scope(self.name) as vs:
            if reuse:
                vs.reuse_variables()
            size = 64
            x = tcl.fully_connected(tf.flatten(x),
                                    64,
                                    activation_fn=tf.nn.relu)
            x = tcl.fully_connected(x, 64, activation_fn=tf.nn.relu)
            x = tcl.fully_connected(x, 64, activation_fn=tf.nn.relu)
            logit = tcl.fully_connected(x, 1, activation_fn=None)

        return logit
示例#7
0
    def intrinsic(self, x, mass_1, weight_1, ind_1, mv_1, ds_1, l_1, mass_2,
                  weight_2, ind_2, mv_2, ds_2, l_2, mass_3, weight_3, ind_3,
                  mv_3, ds_3, l_3):

        f1 = self.surface_conv(x, mass_1, weight_1, ind_1, mv_1, ds_1, l_1,
                               self.ker1, self.bias1, self.n_channel,
                               self.n_ker[0])
        f2 = self.surface_conv(f1, mass_2, weight_2, ind_2, mv_2, ds_2, l_2,
                               self.ker2, self.bias2, self.n_channel,
                               self.n_ker[1])
        f3 = self.surface_conv(f2, mass_3, weight_3, ind_3, mv_3, ds_3, l_3,
                               self.ker3, self.bias3, self.n_channel,
                               self.n_ker[2])
        f4 = self.surface_conv(f3, mass_3, weight_3, ind_3, mv_3, ds_3, l_3,
                               self.ker4, self.bias4, self.n_channel,
                               self.n_ker[4])
        print(f4)
        f5 = tf.flatten(f4)
        print(f5)
        return f4
def run_cnn():
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

    learning_rate = 0.0001
    epochs = 10
    batch_size = 50

    x = tf.placeholder(tf.float32, [None, 784])
    x_shaped = tf.reshape(x, [-1, 28, 28, 1])

    y = tf.placeholder(tf.float32, [None, 10])

    layer1 = create_conv_layer(x_shaped, 1, 32, [5, 5], [2, 2], "layer1")
    layer2 = create_conv_layer(layer1, 32, 64, [5, 5], [2, 2], "layer2")

    flattened = tf.flatten(layer2, [-1, 3136])

    wd1 = tf.Variable(tf.truncated_normal([3136, 1000], stddev=0.03),
                      name="wd1")
    bd1 = tf.Variable(tf.truncated_normal([1000], stddev=0.03), name="bd1")

    out_layer1 = tf.matmul(flattened, wd1) + bd1
    out_layer1 = tf.nn.relu(out_layer1)

    wd2 = tf.Variable(tf.truncated_normal([1000, 10], stddev=0.03), name="wd2")
    bd2 = tf.Variable(tf.truncated_normal([10], stddev=0.03), name="bd2")

    out_layer2 = tf.matmul(out_layer1, wd2) + bd2
    y_ = tf.nn.softmax(out_layer2)

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=out_layer2, labels=y))

    optimiser = tf.train.AdamOptimizer(
        learning_rate=learning_rate).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    init_op = tf.global_variables_initializer()
示例#9
0
def dice_coef(y_true, y_pred):
    smooth = 1.
    y_true_f = tf.flatten(y_true)
    y_pred_f = tf.flatten(y_pred)
    intersection = tf.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (tf.sum(y_true_f) + tf.sum(y_pred_f) + smooth)
示例#10
0
    def __init__(self,
                 policy,
                 ob_space,
                 ac_space,
                 max_grad,
                 encoef=0.01,
                 vcoef=0.5,
                 klcoef=0.1,
                 aggregator='concat',
                 traj_len=8,
                 nh=64):
        ob_shape = ob_space.shape
        nfeat = np.prod(ob_shape)
        nactions = ac_space.n
        self.OBS = tf.placeholder(dtype=tf.float32, shape=(None, ) + ob_shape)
        self.NEXT_OBS = tf.placeholder(dtype=tf.float32,
                                       shape=(None, ) + ob_shape)
        self.R = tf.placeholder(dtype=tf.float32, shape=(None, ))
        self.ADV = tf.placeholder(dtype=tf.float32, shape=(None, ))
        self.OLDACTIONS = tf.placeholder(dtype=tf.int32, shape=(None, ))
        self.OLDVALUES = tf.placeholder(dtype=tf.float32, shape=(None, ))
        self.OLDNLPS = tf.placeholder(dtype=tf.float32, shape=(None, ))
        self.CLIPRANGE = tf.placeholder(dtype=tf.float32)
        self.LR = tf.placeholder(dtype=tf.float32)  #placeholder

        flat_obs = tf.flatten(self.OBS)
        flat_next = tf.flatten(self.NEXT_OBS)

        pdtype = make_pdtype(ac_space)
        Aggregator = {'concat': tf.layers.flatten}[aggregator]

        self.sess = tf.get_default_session()

        encoder_params = [nh, tf.contrib.rnn.BasicLSTMCell, None, 'Encoder']
        model_params = [
            traj_len, 'Env_Model', self.LR, nh, nfeat, vcoef, tf.nn.tanh,
            max_grad
        ]
        mb_policy_params = ['Model_Based', nh, tf.nn.tanh]
        mf_policy_params = ['Model_Free', nh, tf.nn.tanh]

        trajectories = []
        # Imagination Core
        # trajectory encoder
        self.model_free_policy = MFPolicy(flat_obs, pdtype, *mf_policy_params)
        model_free_pd = self.model_free_policy.pd(flat_obs)[0]
        model_free_output = model_free_pd.logits

        # model free policy for imagined rollouts
        self.environment_model = EnvironmentModel(flat_obs, nactions,
                                                  self.OLDACTIONS, flat_next,
                                                  self.R,
                                                  self.model_free_policy,
                                                  *model_params)
        # trajectory rollout model
        trajectories = tf.reshape(
            tf.stack(self.environment_model.trajectories), [1, 0, 2, 3])

        self.encoder = Encoder(trajectories, *encoder_params)
        encoded_traj = self.encoder.encoded_traj
        plan = Aggregator(encoded_traj)
        # encoding
        self.model_based_policy = MBPolicy(flat_obs, model_free_output, plan,
                                           pdtype, *mb_policy_params)
        # model based policy
        self.act = self.model_based_policy.act
        nlp = self.model_based_policy.pd.neglogp(self.OLDACTIONS)
        val = self.model_based_policy.vf

        # training terms
        adv = self.ADV
        ratio = tf.exp(self.OLDNLPS - nlp)
        pl1 = -adv * ratio
        pl2 = -adv * tf.clip_by_value(ratio, 1.0 - self.CLIPRANGE,
                                      1.0 + self.CLIPRANGE)
        ploss = tf.reduce_mean(tf.maximum(pl1, pl2))

        vclip = self.OLDVALUES + tf.clip_by_value(
            val - self.OLDVALUES, -self.CLIPRANGE, self.CLIPRANGE)
        vf_losses1 = tf.square(val - self.R)
        vf_losses2 = tf.square(vclip - self.R)
        vloss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))
        self.approxkl = .5 * tf.reduce_mean(tf.square(nlp - self.OLDNLPS))
        self.clipfrac = tf.reduce_mean(
            tf.to_float(tf.greater(tf.abs(ratio - 1.0), self.CLIPRANGE)))

        cross_entropy = model_free_pd.kl(self.model_based_policy.pd)
        label_entropy = self.model_based_policy.pd.entropy()
        logit_entropy = model_free_pd.entropy()
        entropy = klcoef * cross_entropy - encoef * (label_entropy +
                                                     logit_entropy)

        self.rl_loss = ploss + vloss + entropy
        optimizer = tf.train.AdamOptimizer(self.LR)
        params = tf.trainable_variables()
        grads = tf.gradients(self.rl_loss, params)
        if max_grad is not None:
            grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad)
        grads = list(zip(grads, params))
        #self.rl_trainer = optimizer.apply_gradients(grads)
        self.rl_trainer = tf.no_op()

        self.environment_trainer = self.environment_model.trainer
        self.curiosity = self.environment_model.curiosity

        self.ploss = ploss
        self.vloss = vloss
        self.entropy = entropy
        self.nlp = nlp
        self.values = val

        self.loss_names = ['approxkl', 'clipfrac', 'ploss', 'vloss', 'entropy']
        tf.global_variables_initializer().run(session=self.sess)
def dice_coefficient(y1, y2):
    y1 = tf.flatten(y1)
    y2 = tf.flatten(y2)
    return (2. * tf.sum(y1 * y2) + smoothness) / (tf.sum(y1) + tf.sum(y2) + smoothness)
示例#12
0
predictions = model.predict(x[:20])

print("predictions:", predictions)
print("results:", y[:20])

test_stream = read_g729_norm('dtmf_2021_H_02_H_18.vol1.0.18')
i = 0
while i < len(test_stream):
    tf = test_stream[i:i + 10]
    if len(tf) < 10:
        break

    #res = []
    #for row in np.transpose(tf):
    #    res.append(scipy.fft.dct(row))
    #tf = np.array(res).flatten()
    tf = tf.flatten()

    predictions = model.predict(np.array([
        tf,
    ]))
    didx = np.argmax(predictions)
    dtmf = g729_model_ovec[didx]
    if dtmf != None:
        print(i, dtmf, predictions[0][didx])
        i += 10
    else:
        i += 1
        if i % 10 == 0:
            print('_')
def dice_coefficient(y1, y2):
    y1 = tf.flatten(y1)
    y2 = tf.flatten(y2)
    return (2. * tf.sum(y1 * y2) + smoothness) / (tf.sum(y1) + tf.sum(y2) +
                                                  smoothness)