for f in sorted(os.listdir(env.dataset())): if f.endswith("sparse_acoustic_data.dump"): print "Considering {} as input".format(f) source_data_file_list.append(env.dataset(f)) data_file_list = source_data_file_list[:] max_t, input_size = 0, None data_denoms = [] data_corpus = None data_ends = [] for source_id, inp_file in enumerate(data_file_list): print "Reading {}".format(inp_file) d = SparseAcoustic.deserialize(inp_file) max_t = max(d.data.shape[0], max_t) assert input_size is None or input_size == d.data.shape[1], "Got spikes with another neurons number" input_size = d.data.shape[1] if data_corpus is None: data_corpus = d.data else: scipy.sparse.vstack([data_corpus, d.data]) data_ends.append(data_corpus.shape[0]) data_denoms.append(d.data_denom) batch_size = len(data_ends)*20 # data_ends = np.asarray([5000, data_ends[0]], dtype=np.int32) def sigmoid(x): return 1.0/(1.0 +np.exp(-x))
sp.append(sparsity) print "Epoch {}, cost {}, sparsity {}".format(e, sum(mc)/len(mc), sum(sp)/len(sp)) if epochs > 0: for source_id in xrange(len(data_source)): if not sel is None and not source_id in sel: continue song_data, source_sr, data_denom = read_song(source_id) _, sparsity, filter_v, rfilter_v, bias_v, hidden_final, _ = roll_around(song_data, True) # hidden_final[np.where(hidden_final < 1e-05)] = 0.0 sa = SparseAcoustic(hidden_final, data_denom) sa.serialize(env.dataset("{}_sparse_acoustic_data.dump".format(source_id))) out_final = restore_hidden(sess, rfilter_v.reshape(L, filters_num), hidden_final, k) out_final *= data_denom data_recov = lr.resample(out_final, target_sr, source_sr, scale=True) lr.output.write_wav(env.result("{}_recovery.wav".format(source_id)), data_recov, source_sr) print "Saving in {}".format(saver.save(sess, model_fname)) rfilter_v = sess.run(cm.recov_filter) np.save(open(env.run("recov_filter.pkl"), "w"), rfilter_v) # else: # source_sr = 22000 # src = env.run("nn_dream.dump")