Пример #1
0
    def deserialize(sess=None):        
        src = open(env.run(ConvModel.CONV_STATE))
        cfg = pkl.load(src)
        ds_info = pkl.load(src)
        cm = ConvModel(cfg, ds_info)
        if sess:
            cm.build()
            saver = tf.train.Saver()
            modelf = env.run(ConvModel.MODEL_FNAME)
            if os.path.exists(modelf):
                logging.info("Restoring from {}".format(modelf))
                saver.restore(sess, modelf)

        return cm
Пример #2
0
    def deserialize(sess=None):
        src = open(env.run(ConvModel.CONV_STATE))
        cfg = pkl.load(src)
        ds_info = pkl.load(src)
        cm = ConvModel(cfg, ds_info)
        if sess:
            cm.build()
            saver = tf.train.Saver()
            modelf = env.run(ConvModel.MODEL_FNAME)
            if os.path.exists(modelf):
                logging.info("Restoring from {}".format(modelf))
                saver.restore(sess, modelf)

        return cm
Пример #3
0
 def serialize(self, sess):
     model_fname = env.run(ConvModel.MODEL_FNAME)
     logging.info("Saving model in {}".format(model_fname))
     saver = tf.train.Saver()
     saver.save(sess, model_fname)
     
     dst = open(env.run(ConvModel.CONV_STATE), "w")
     
     pkl.dump(self.cfg, dst)
     pkl.dump(self.ds_info, dst)
     recov_filter_fname = env.run(ConvModel.RECOV_FILTER_FNAME)
     np.save(
         open(recov_filter_fname, "w"), 
         sess.run(self.recov_filter).reshape(self.cfg.filter_len, self.cfg.filters_num)
     )
Пример #4
0
    def serialize(self, sess):
        model_fname = env.run(ConvModel.MODEL_FNAME)
        logging.info("Saving model in {}".format(model_fname))
        saver = tf.train.Saver()
        saver.save(sess, model_fname)

        dst = open(env.run(ConvModel.CONV_STATE), "w")

        pkl.dump(self.cfg, dst)
        pkl.dump(self.ds_info, dst)
        recov_filter_fname = env.run(ConvModel.RECOV_FILTER_FNAME)
        np.save(
            open(recov_filter_fname, "w"),
            sess.run(self.recov_filter).reshape(self.cfg.filter_len, self.cfg.filters_num),
        )
Пример #5
0
    def restore_hidden(self, hidden_data, recov_filter=None, sess=None):
        if recov_filter is None:
            recov_filter_fname = env.run(ConvModel.RECOV_FILTER_FNAME)
            assert os.path.exists(recov_filter_fname), "Need recovery filter filename {} to recover".format(recov_filter_fname)
            recov_filter = np.load(open(recov_filter_fname))
            recov_filter = recov_filter.reshape(150, 100) # HACK TODO

        sess = sess if sess else tf.Session()

        output_data = []
        for id_start in xrange(0, hidden_data.shape[0], self.cfg.batch_size):
            data_slice = hidden_data[id_start:min(id_start+self.cfg.batch_size, hidden_data.shape[0])]
            data_slice[np.where(data_slice < 1e-02)] = 0.0
            
            hidden_input = tf.placeholder(tf.float32, shape=(1, data_slice.shape[0], 1, data_slice.shape[1]), name="Input")
            recov_filter_input = tf.placeholder(
                tf.float32, 
                shape=(recov_filter.shape[0], 1, 1, recov_filter.shape[1]), 
                name="RecovFilter"
            )
            
            output = tf.nn.conv2d_transpose(
                hidden_input, 
                recov_filter_input, 
                output_shape = (1, self.cfg.strides*data_slice.shape[0]+recov_filter.shape[0]-1, 1, 1), 
                strides=[1, self.cfg.strides, 1, 1], 
                padding='VALID'
            )
            output = tf.nn.l2_normalize(output, dim=1)
            out_v = sess.run([output], {
                recov_filter_input: recov_filter.reshape(recov_filter.shape[0], 1, 1, recov_filter.shape[1]), 
                hidden_input: data_slice.reshape(1, data_slice.shape[0], 1, data_slice.shape[1])
            })
            output_data.append(out_v[0].reshape(out_v[0].shape[1]))
        return np.concatenate(output_data)
Пример #6
0
    def restore_hidden(self, hidden_data, recov_filter=None, sess=None):
        if recov_filter is None:
            recov_filter_fname = env.run(ConvModel.RECOV_FILTER_FNAME)
            assert os.path.exists(recov_filter_fname), "Need recovery filter filename {} to recover".format(
                recov_filter_fname
            )
            recov_filter = np.load(open(recov_filter_fname))
            recov_filter = recov_filter.reshape(150, 100)  # HACK TODO

        sess = sess if sess else tf.Session()

        output_data = []
        for id_start in xrange(0, hidden_data.shape[0], self.cfg.batch_size):
            data_slice = hidden_data[id_start : min(id_start + self.cfg.batch_size, hidden_data.shape[0])]
            data_slice[np.where(data_slice < 1e-02)] = 0.0

            hidden_input = tf.placeholder(
                tf.float32, shape=(1, data_slice.shape[0], 1, data_slice.shape[1]), name="Input"
            )
            recov_filter_input = tf.placeholder(
                tf.float32, shape=(recov_filter.shape[0], 1, 1, recov_filter.shape[1]), name="RecovFilter"
            )

            output = tf.nn.conv2d_transpose(
                hidden_input,
                recov_filter_input,
                output_shape=(1, self.cfg.strides * data_slice.shape[0] + recov_filter.shape[0] - 1, 1, 1),
                strides=[1, self.cfg.strides, 1, 1],
                padding="VALID",
            )
            output = tf.nn.l2_normalize(output, dim=1)
            out_v = sess.run(
                [output],
                {
                    recov_filter_input: recov_filter.reshape(recov_filter.shape[0], 1, 1, recov_filter.shape[1]),
                    hidden_input: data_slice.reshape(1, data_slice.shape[0], 1, data_slice.shape[1]),
                },
            )
            output_data.append(out_v[0].reshape(out_v[0].shape[1]))
        return np.concatenate(output_data)
Пример #7
0
# if not os.path.exists(df):
#     song_data_raw, source_sr = lr.load(fname)
#     print "Got sampling rate {}, resampling to {} ...".format(source_sr, c.target_sr)
#     song_data = lr.resample(song_data_raw, source_sr, c.target_sr, scale=True)
#     song_data = song_data[:data_size,]

#     np.save(open(df, "w"), song_data)
# else:
#     song_data = np.load(open(df))

# data, data_denom = norm(song_data)

sess = tf.Session()
sess.run(tf.initialize_all_variables())

[os.remove(f) for f in glob("{}/*.png".format(env.run()))]


def run(state_v, inputs_v, targets_v):
    return sess.run(
        [
            output_norm, finstate, cost, net_out, conv_out, filter,
            recov_filter, apply_grads, target_norm, grads
        ], {
            input: inputs_v.reshape(1, c.seq_size, 1, 1),
            target: targets_v.reshape(1, c.seq_size, 1, 1),
            state: state_v,
        })


def eval(state_v, inputs_v):
Пример #8
0
#     song_data_raw, source_sr = lr.load(fname)
#     print "Got sampling rate {}, resampling to {} ...".format(source_sr, c.target_sr)
#     song_data = lr.resample(song_data_raw, source_sr, c.target_sr, scale=True)
#     song_data = song_data[:data_size,]

#     np.save(open(df, "w"), song_data)
# else:
#     song_data = np.load(open(df))

# data, data_denom = norm(song_data)


sess = tf.Session()
sess.run(tf.initialize_all_variables())

[ os.remove(f) for f in glob("{}/*.png".format(env.run())) ]


def run(state_v, inputs_v, targets_v):
    return sess.run(
        [
            output_norm,
            finstate, 
            cost, 
            net_out,
            conv_out,
            filter, 
            recov_filter,
            apply_grads,
            target_norm,
            grads
Пример #9
0

lrate_var = tf.Variable(0.0, trainable=False)

tvars = tf.trainable_variables()
grads_raw = tf.gradients(loss, tvars)
grads, _ = tf.clip_by_global_norm(grads_raw, 5.0)

optimizer = tf.train.AdamOptimizer(lrate_var)

train_step = optimizer.apply_gradients(zip(grads, tvars))

sess = tf.Session()
saver = tf.train.Saver()

model_fname = env.run("nn_model.ckpt")
if os.path.exists(model_fname):
    print "Restoring from {}".format(model_fname)
    saver.restore(sess, model_fname)
    epochs = 0
else:
    sess.run(tf.initialize_all_variables())



def generate():
    def zero_batch():
        return [ np.zeros((1, input_size)) for _ in xrange(bptt_steps) ]

    def start_batch():
        return [ data_corpus[seq_id, :].todense() for seq_id in xrange(bptt_steps) ]
Пример #10
0
        x_means.append(np.mean(Rn_v, 0))

    x_mean = np.sum(x_means, 0) / len(x_means)

    it = 0
    x_vars = []
    while it + window_size < data.shape[0]:
        Rn_v, it = form_batch(it, data, batch_size, window_size)

        x_vars.append(np.mean(np.square(Rn_v - x_mean), 0))

    x_var = np.sum(x_vars, 0) / len(x_vars)
    return x_mean, x_var


df = env.run("test_data.pkl")
fname = env.dataset(
    [f for f in os.listdir(env.dataset()) if f.endswith(".wav")][0])

if not os.path.exists(df):
    song_data_raw, source_sr = lr.load(fname)
    print "Got sampling rate {}, resampling to {} ...".format(
        source_sr, target_sr)
    data = lr.resample(song_data_raw, source_sr, target_sr, scale=True)
    # data = song_data[:30000,]

    np.save(open(df, "w"), data)
else:
    data = np.load(open(df))

x_mean, x_var = calc_mean_and_var(data, batch_size, filter_size)
Пример #11
0
    outputs_v, finstate_v = sess.run(
        [
            outputs,
            finstate,
        ], 
        feed_dict
    )
    return np.concatenate(outputs_v), finstate_v




sess = tf.Session()
saver = tf.train.Saver()

model_fname = env.run("test_model.ckpt")
if os.path.exists(model_fname):
    print "Restoring from {}".format(model_fname)
    saver.restore(sess, model_fname)
    c.epochs = 0
else:
    sess.run(tf.initialize_all_variables())


[ os.remove(f) for f in glob("{}/*.png".format(env.run())) ]


data_len = 2*c.seq_size
seed = 1
sfn=9
sf_sigma = 0.1
Пример #12
0
	x_mean = np.sum(x_means, 0)/len(x_means)

	it = 0
	x_vars = []
	while it + window_size < data.shape[0]:
		Rn_v, it = form_batch(it, data, batch_size, window_size)

		x_vars.append(np.mean(np.square(Rn_v - x_mean), 0))

	x_var = np.sum(x_vars, 0)/len(x_vars)
	return x_mean, x_var




df = env.run("test_data.pkl")
fname = env.dataset([f for f in os.listdir(env.dataset()) if f.endswith(".wav")][0])

if not os.path.exists(df):
    song_data_raw, source_sr = lr.load(fname)
    print "Got sampling rate {}, resampling to {} ...".format(source_sr, target_sr)
    data = lr.resample(song_data_raw, source_sr, target_sr, scale=True)
    # data = song_data[:30000,]

    np.save(open(df, "w"), data)
else:
    data = np.load(open(df))


x_mean, x_var = calc_mean_and_var(data, batch_size, filter_size)