def build_from_gene(x, n_out_channel: int, seq: List[bool]): n_stack = geneop.cvtlstint(seq) f = 256 x = tf.transpose(x, (0, 2, 1, 3)) cnv1 = cnv(x, [7, 7, 1, 64], 'cnv1', stride=[1, 1, 1, 1]) cnv2 = cnv(cnv1, [3, 3, 64, 128], 'cnv2') #pool1 = pool(cnv2, 'pool1') cnv2b = cnv(cnv2, [3, 3, 128, 128], 'cnv2b') cnv3 = cnv(cnv2b, [3, 3, 128, 128], 'cnv3') cnv4 = cnv(cnv3, [3, 3, 128, f], 'cnv4') inter = cnv4 preds = [] for i in range(n_stack): hg = hourglass(inter, 4, f, i) cnv5 = cnv(hg, [3, 3, f, f], 'cnv5_%d' % i) cnv6 = cnv(cnv5, [1, 1, f, f], 'cnv6_%d' % i) preds += [ cnv(cnv6, [1, 1, f, n_out_channel], 'out_%d' % i, dorelu=False) ] if i < 3: inter = inter + cnv(cnv6, [1, 1, f, f], 'tmp_%d' % i, dorelu=False) + cnv(preds[-1], [1, 1, n_out_channel, f], 'tmp_out_%d' % i, dorelu=False) return [tf.transpose(v, (0, 2, 1, 3)) for v in preds]
def eval_fn(gene_list: List[List[bool]]): # Query if gene is cached container_key = (cfg.worker_type, dict_to_tuple(cfg.worker_config)) result_container = self.worker_cache[container_key] query_result = [ result_container.get(geneop.cvtlstint(gene), None) for gene in gene_list ] for result in query_result: if result is not None: glob.print_status("EVAL", "score=%s (cached)" % (str(result), )) missed_gene_list = [ gene for gene, result in zip(gene_list, query_result) if result is None ] missed_gene_idx_list = [ i for i, result in enumerate(query_result) if result is None ] if missed_gene_list: # Send task to workers glob.task_input_queue.put(missed_gene_list) worker_result_list = glob.task_output_queue.get() glob.incoming_population.clear() # Combine result for idx, gene, result in zip(missed_gene_idx_list, missed_gene_list, worker_result_list): query_result[idx] = result result_container[geneop.cvtlstint(gene)] = result # Write cache if cfg.worker_cache_path is not None: if cfg.worker_cache_backup_path is not None and os.path.exists( cfg.worker_cache_path): # backup old cache, assume filesystem metadata operation is atomic if os.path.exists(cfg.worker_cache_backup_path): os.remove(cfg.worker_cache_backup_path) os.rename(cfg.worker_cache_path, cfg.worker_cache_backup_path) with open(cfg.worker_cache_path, "wb") as f: pickle.dump(self.worker_cache, f) out = tuple( tuple(result[idx] for idx in cfg.result_needed) for result in query_result) out_valid = tuple(result[cfg.valid_idx] for result in query_result) return out, out_valid
def do(i_gen, path): import evo_g1 as g1 evo = g1.EvoCore() evo.load(path) evo.population_genome.sort(key=lambda x: x[1][0] if score_type == "test" else x[2], reverse=True) a = "%04d" % (i_gen, ) if i_gen != -1 else "init" b = "%d" % (i_gen, ) if i_gen != -1 else "INIT" c = {"mir2": "MIR", "dsd2": "DSD", "mus2": "MUS"}[dataset_type] print( "v1 g1 %s %d gen_%s_1 S-%s-1-%s" % (dataset_type, geneop.cvtlstint(evo.population_genome[0][0]), a, b, c)) print( "v1 g1 %s %d gen_%s_2 S-%s-2-%s" % (dataset_type, geneop.cvtlstint(evo.population_genome[1][0]), a, b, c)) print( "v1 g1 %s %d gen_%s_3 S-%s-3-%s" % (dataset_type, geneop.cvtlstint(evo.population_genome[3][0]), a, b, c))
def boundary(): cb = geneop.cvtintlst(0b0, 1) ccg_res = geneop.cvtintlst(0b1, 1) + cb + cb cg_res_64 = geneop.cvtintlst(0b11, 2) + ccg_res cg_pass = geneop.cvtintlst(0b00, 2) + ccg_res pb_1x16 = geneop.cvtintlst(0b00_11, 4) pb_pass = geneop.cvtintlst(0b00_00, 4) rb_a = pb_1x16 + geneop.cvtintlst(0b11, 2) + ccg_res rb_pass = pb_pass + geneop.cvtintlst(0b11, 2) + ccg_res b_a = cg_res_64 + rb_a + rb_pass + ccg_res b_b = cg_res_64 + rb_pass + rb_a + ccg_res assert b_from_gene(None, 128, b_a) == b_from_gene(None, 128, b_a) v4m0 = geneop.cvtintlst(0b11_0_00_000_0000, 12) + b_a + b_a + b_a + b_a + b_a v4m0_b = geneop.cvtintlst(0b11_0_00_000_0000, 12) + b_b + b_a + b_b + b_a + b_b v4m0_c = geneop.cvtintlst(0b11_0_01_001_0001, 12) + b_b + b_a + b_b + b_a + b_b assert cmp_gene(v4m0, v4m0_b) assert not cmp_gene(v4m0, v4m0_c) print(geneop.cvtlstint(v4m0))
def eval_gene_core(gene): import time import tensorflow as tf import numpy as np from eval_util import bss_eval_sdr print(" :GENE: %d" % (geneop.cvtlstint(gene), )) n_feature = cfg.n_feature tf.compat.v1.reset_default_graph() graph = tf.Graph() t = time.time() with graph.as_default(): random.seed(0x41526941) np.random.seed(0x41526941) tf.compat.v1.random.set_random_seed(0x41526941) sess_conf = tf.compat.v1.ConfigProto( gpu_options=tf.compat.v1.GPUOptions( allow_growth=True, per_process_gpu_memory_fraction=1.0), allow_soft_placement=True, ) with tf.compat.v1.Session(config=sess_conf) as sess: # TRAIN p_feature = tf.compat.v1.placeholder(tf.float32, shape=(cfg.batch_size, cfg.n_hop_per_sample, n_feature, 1), name='x_mixed') p_target = tf.compat.v1.placeholder( tf.float32, shape=(cfg.batch_size, cfg.n_hop_per_sample, n_feature, n_out_channel), name='y_mixed') v_pred = geneop.build_from_gene(p_feature, n_out_channel, gene) n_param = netop.count_parameter() print(" :Total {:,} parameters in total".format(n_param)) if "neg_gflops" in cfg.result_format: n_forward_flop = tf.compat.v1.profiler.profile( graph, options=tf.compat.v1.profiler.ProfileOptionBuilder. float_operation()).total_float_ops print(" :Forward operation needs {:,} FLOPS".format( n_forward_flop)) v_pred_clipped = tf.clip_by_value(v_pred, 0.0, 1.0) * p_feature v_loss = tf.reduce_mean(input_tensor=tf.abs(v_pred * p_feature - p_target)) v_step = tf.Variable(0, dtype=tf.int32, trainable=False, name="step") p_lr_fac = tf.compat.v1.placeholder(tf.float32, name="p_lr_fac") v_lr = p_lr_fac * tf.compat.v1.train.cosine_decay_restarts( cfg.max_lr, v_step, cfg.first_lr_period, alpha=cfg.min_lr / cfg.max_lr, t_mul=2.0) op_optim = tf.compat.v1.train.AdamOptimizer( learning_rate=v_lr).minimize(v_loss, global_step=v_step) sess.run(tf.compat.v1.global_variables_initializer()) loss_list = [] data_feature = np.zeros( (cfg.batch_size, cfg.n_hop_per_sample, n_feature, 1)) data_target = np.zeros((cfg.batch_size, cfg.n_hop_per_sample, n_feature, n_out_channel)) seg_idx_list = np.array([]) for i_step in range(cfg.n_step): for i_batch in range(cfg.batch_size): if seg_idx_list.size == 0: seg_idx_list = np.random.permutation( len(train_seg_list)) idx = seg_idx_list[0] seg_idx_list = seg_idx_list[1:] spec_mixed, spec_vocal, spec_inst = train_seg_list[idx] start_idx = np.random.randint( 0, len(spec_mixed) - cfg.n_hop_per_sample) data_feature[i_batch, :, :, 0] = spec_mixed[start_idx:start_idx + cfg.n_hop_per_sample, :] data_target[i_batch, :, :, 0] = spec_inst[start_idx:start_idx + cfg.n_hop_per_sample, :] data_target[i_batch, :, :, 1] = spec_vocal[start_idx:start_idx + cfg.n_hop_per_sample, :] if i_step <= cfg.warmup_period: lr_fac = cfg.warmup_fac # Slow start to prevent some fast values go broken else: lr_fac = 1.0 loss_value, _ = sess.run( [v_loss, op_optim], feed_dict={ p_feature: data_feature, p_target: data_target, p_lr_fac: lr_fac }) loss_list.append(loss_value) # EVAL sdr_list = [] valid_sdr_list = [] ret_list = [] for i_eval, (real_vocal, real_inst, magn_orig_list, phase_list, norm_fac) in enumerate(eval_seg_list): n_hop, _ = magn_orig_list.shape magn_inst_list = np.zeros_like(magn_orig_list, dtype=np.float32) magn_vocal_list = np.zeros_like(magn_orig_list, dtype=np.float32) data_feature = np.zeros( (cfg.batch_size, cfg.n_hop_per_sample, n_feature, 1), dtype=np.float32) batch_hop_list = [] def flush_buffer(): pred_value, = sess.run([v_pred_clipped], feed_dict={p_feature: data_feature}) for i_batch, (i_batch_hop, offset_begin, offset_end) in enumerate(batch_hop_list): magn_inst_list[i_batch_hop + offset_begin:i_batch_hop + offset_end, :-1] = pred_value[ i_batch, offset_begin:offset_end, :, 0] magn_vocal_list[i_batch_hop + offset_begin:i_batch_hop + offset_end, :-1] = pred_value[ i_batch, offset_begin:offset_end, :, 1] data_feature.fill(0.0) batch_hop_list.clear() def enqueue_buffer(data, i_batch_hop, offset_begin, offset_end): if len(batch_hop_list) == cfg.batch_size: flush_buffer() i_batch = len(batch_hop_list) data_feature[i_batch, :data.shape[0], :, 0] = data batch_hop_list.append( (i_batch_hop, offset_begin, offset_end)) i_hop = 0 while i_hop + cfg.n_hop_per_sample < n_hop: data = magn_orig_list[i_hop:i_hop + cfg.n_hop_per_sample, :-1] if i_hop == 0: enqueue_buffer(data, i_hop, 0, cfg.n_hop_per_sample * 3 // 4) else: enqueue_buffer(data, i_hop, cfg.n_hop_per_sample // 4, cfg.n_hop_per_sample * 3 // 4) i_hop += cfg.n_hop_per_sample // 2 data = magn_orig_list[i_hop:, :-1] enqueue_buffer(data, i_hop, cfg.n_hop_per_sample // 4, n_hop - i_hop) flush_buffer() unit_magn = np.exp(1j * phase_list) fake_inst = librosa.istft( (magn_inst_list * unit_magn * norm_fac).T, hop_length=cfg.hop_size) fake_vocal = librosa.istft( (magn_vocal_list * unit_magn * norm_fac).T, hop_length=cfg.hop_size) if (fake_inst <= 1e-8).all() or (fake_vocal <= 1e-8).all(): sdr_list.append(-999999) else: #saveWav("fakeinst.wav", fake_inst, cfg.work_sr) #saveWav("fakevocal.wav", fake_vocal, cfg.work_sr) ret_list.append( cfg.pool.apply_async(bss_eval_sdr, ( np.array([real_inst], dtype=np.float32), np.array([fake_inst], dtype=np.float32), ))) ret_list.append( cfg.pool.apply_async(bss_eval_sdr, ( np.array([real_vocal], dtype=np.float32), np.array([fake_vocal], dtype=np.float32), ))) ret_list = [x.get()[0] for x in ret_list] for i_eval, sdr in enumerate(zip(ret_list[::2], ret_list[1::2])): mean_sdr = np.mean(sdr) if i_eval < cfg.n_eval: sdr_list.append(mean_sdr) else: valid_sdr_list.append(mean_sdr) result_list = [] for result_type in cfg.result_format: if result_type == "sdr": result_list.append(np.mean(sdr_list)) elif result_type == "neg_mega_pc": result_list.append(-n_param / 1000_000.0) elif result_type == "neg_gflops": result_list.append(-n_forward_flop / 1_000_000_000.0) elif result_type == "valid_sdr": result_list.append(np.mean(valid_sdr_list)) else: raise ValueError("Unsupported result_type `%s`" % (result_type, )) print(" EVAL RESULT: t=%.2f, train_loss=%.09f, result=%r" % (time.time() - t, np.mean(loss_list), result_list)) return result_list
def do(i_gen, path): import evo_nsga2 as nsga2 evo = nsga2.EvoCore() evo.load(path) sdr_list = np.asarray([ score[0] if score_type == "test" else v for gene, score, v in evo.population_genome ]) eval_sdr_list = np.asarray( [score[0] for gene, score, v in evo.population_genome]) param_list = np.asarray( [-score[1] for gene, score, v in evo.population_genome]) n = len(param_list) idx_list = np.argsort(param_list) i_fast = np.argmin(param_list) '''i_p25 = np.argmin(np.abs(param_list - rec_p25)) i_p50 = np.argmin(np.abs(param_list - rec_p50)) i_p75 = np.argmin(np.abs(param_list - rec_p75))''' i_p25 = idx_list[n // 4] i_p33 = idx_list[n // 3] i_p50 = idx_list[n // 2] i_p66 = idx_list[n * 2 // 3] i_p75 = idx_list[n * 3 // 4] i_p90 = idx_list[n * 9 // 10] i_sdr = np.argmax(sdr_list) i_eval_sdr = np.argmax(eval_sdr_list) p25_sdr_list.append(sdr_list[i_p25]) p33_sdr_list.append(sdr_list[i_p33]) p50_sdr_list.append(sdr_list[i_p50]) p66_sdr_list.append(sdr_list[i_p66]) p75_sdr_list.append(sdr_list[i_p75]) p90_sdr_list.append(sdr_list[i_p90]) if i_gen in (99, 50, 25, 1): a = "%04d" % (i_gen, ) if i_gen != -1 else "init" b = "%d" % (i_gen, ) if i_gen != -1 else "INIT" c = {"mir2": "MIR", "dsd2": "DSD", "mus2": "MUS"}[dataset_type] if i_gen == 99 or (i_gen == 50 and dataset_type == "mus2"): print( "v1 nsga2 %s %d gen_%s_fast M-%s-1-%s" % (dataset_type, geneop.cvtlstint(evo.population_genome[i_fast][0]), a, b, c)) print("v1 nsga2 %s %d gen_%s_p25 M-%s-2-%s" % (dataset_type, geneop.cvtlstint(evo.population_genome[i_p25][0]), a, b, c)) print("v1 nsga2 %s %d gen_%s_p33 M-%s-3-%s" % (dataset_type, geneop.cvtlstint(evo.population_genome[i_p33][0]), a, b, c)) print("v1 nsga2 %s %d gen_%s_p50 M-%s-4-%s" % (dataset_type, geneop.cvtlstint(evo.population_genome[i_p50][0]), a, b, c)) print("v1 nsga2 %s %d gen_%s_p66 M-%s-5-%s" % (dataset_type, geneop.cvtlstint(evo.population_genome[i_p66][0]), a, b, c)) print("v1 nsga2 %s %d gen_%s_p75 M-%s-6-%s" % (dataset_type, geneop.cvtlstint(evo.population_genome[i_p75][0]), a, b, c)) print("v1 nsga2 %s %d gen_%s_p90 M-%s-7-%s" % (dataset_type, geneop.cvtlstint(evo.population_genome[i_p90][0]), a, b, c)) print("v1 nsga2 %s %d gen_%s_sdr M-%s-8-%s" % (dataset_type, geneop.cvtlstint(evo.population_genome[i_sdr][0]), a, b, c)) #print("v1 nsga2 %s %d gen_%s_eval_sdr M-%s-X-%s" % (dataset_type, geneop.cvtlstint(evo.population_genome[i_eval_sdr][0]), a, b, c)) print("") else: print("v1 nsga2 %s %d gen_%s_fast" % (dataset_type, geneop.cvtlstint(evo.population_genome[i_fast][0]), a)) print("v1 nsga2 %s %d gen_%s_p25" % (dataset_type, geneop.cvtlstint(evo.population_genome[i_p25][0]), a)) print("v1 nsga2 %s %d gen_%s_p33" % (dataset_type, geneop.cvtlstint(evo.population_genome[i_p33][0]), a)) print("v1 nsga2 %s %d gen_%s_p50" % (dataset_type, geneop.cvtlstint(evo.population_genome[i_p50][0]), a)) print("v1 nsga2 %s %d gen_%s_p66" % (dataset_type, geneop.cvtlstint(evo.population_genome[i_p66][0]), a)) print("v1 nsga2 %s %d gen_%s_p75" % (dataset_type, geneop.cvtlstint(evo.population_genome[i_p75][0]), a)) print("v1 nsga2 %s %d gen_%s_p90" % (dataset_type, geneop.cvtlstint(evo.population_genome[i_p90][0]), a)) print("v1 nsga2 %s %d gen_%s_sdr" % (dataset_type, geneop.cvtlstint(evo.population_genome[i_sdr][0]), a)) print("")