def main(): print('go to model') print '*' * 80 spk_global_gen = prepare_data(mode='global', train_or_test='train') #写一个假的数据生成,可以用来写模型先 global_para = spk_global_gen.next() print global_para spk_all_list, dict_spk2idx, dict_idx2spk, mix_speech_len, speech_fre, total_frames, spk_num_total = global_para del spk_global_gen num_labels = len(spk_all_list) # data_generator=prepare_data('once','train') # data_generator=prepare_data_fake(train_or_test='train',num_labels=num_labels) #写一个假的数据生成,可以用来写模型先 #此处顺序是 mix_speechs.shape,mix_feas.shape,aim_fea.shape,aim_spkid.shape,query.shape #一个例子:(5, 17040) (5, 134, 129) (5, 134, 129) (5,) (5, 32, 400, 300, 3) # datasize=prepare_datasize(data_generator) # mix_speech_len,speech_fre,total_frames,spk_num_total,video_size=datasize print 'Begin to build the maim model for Multi_Modal Cocktail Problem.' # data=data_generator.next() # This part is to build the 3D mix speech embedding maps. mix_hidden_layer_3d = MIX_SPEECH(speech_fre, mix_speech_len).cuda() mix_speech_classifier = MIX_SPEECH_classifier(speech_fre, mix_speech_len, num_labels).cuda() mix_speech_multiEmbedding = SPEECH_EMBEDDING( num_labels, config.EMBEDDING_SIZE, spk_num_total + config.UNK_SPK_SUPP).cuda() print mix_hidden_layer_3d print mix_speech_classifier # mix_speech_hidden=mix_hidden_layer_3d(Variable(torch.from_numpy(data[1])).cuda()) # mix_speech_output=mix_speech_classifier(Variable(torch.from_numpy(data[1])).cuda()) # 技巧:alpha0的时候,就是选出top_k,top_k很大的时候,就是选出来大于alpha的 # top_k_mask_mixspeech=top_k_mask(mix_speech_output,alpha=config.ALPHA,top_k=config.MAX_MIX) # top_k_mask_mixspeech=top_k_mask(mix_speech_output,alpha=config.ALPHA,top_k=3) # print top_k_mask_mixspeech # mix_speech_multiEmbs=mix_speech_multiEmbedding(top_k_mask_mixspeech) # bs*num_labels(最多混合人个数)×Embedding的大小 # mix_speech_multiEmbs=mix_speech_multiEmbedding(Variable(torch.from_numpy(top_k_mask_mixspeech),requires_grad=False).cuda()) # bs*num_labels(最多混合人个数)×Embedding的大小 # 需要计算:mix_speech_hidden[bs,len,fre,emb]和mix_mulEmbedding[bs,num_labels,EMB]的Attention # 把 前者扩充为bs*num_labels,XXXXXXXXX的,后者也是,然后用ATT函数计算它们再转回来就好了 # mix_speech_hidden_5d=mix_speech_hidden.view(config.BATCH_SIZE,1,mix_speech_len,speech_fre,config.EMBEDDING_SIZE) # mix_speech_hidden_5d=mix_speech_hidden_5d.expand(config.BATCH_SIZE,num_labels,mix_speech_len,speech_fre,config.EMBEDDING_SIZE).contiguous() # mix_speech_hidden_5d=mix_speech_hidden_5d.view(-1,mix_speech_len,speech_fre,config.EMBEDDING_SIZE) # att_speech_layer=ATTENTION(config.EMBEDDING_SIZE,'align').cuda() # att_multi_speech=att_speech_layer(mix_speech_hidden_5d,mix_speech_multiEmbs.view(-1,config.EMBEDDING_SIZE)) # print att_multi_speech.size() # att_multi_speech=att_multi_speech.view(config.BATCH_SIZE,num_labels,mix_speech_len,speech_fre,-1) # print att_multi_speech.size() # This part is to conduct the video inputs. query_video_layer = VIDEO_QUERY(total_frames, config.VideoSize, spk_num_total).cuda() # print query_video_layer # query_video_output,xx=query_video_layer(Variable(torch.from_numpy(data[4]))) # This part is to conduct the memory. # hidden_size=(config.HIDDEN_UNITS) hidden_size = (config.EMBEDDING_SIZE) memory = MEMORY(spk_num_total + config.UNK_SPK_SUPP, hidden_size) memory.register_spklist(spk_all_list) #把spk_list注册进空的memory里面去 # Memory function test. print 'memory all spkid:', memory.get_all_spkid() # print memory.get_image_num('Unknown_id') # print memory.get_video_vector('Unknown_id') # print memory.add_video('Unknown_id',Variable(torch.ones(300))) # This part is to test the ATTENTION methond from query(~) to mix_speech # x=torch.arange(0,24).view(2,3,4) # y=torch.ones([2,4]) att_layer = ATTENTION(config.EMBEDDING_SIZE, 'align').cuda() att_speech_layer = ATTENTION(config.EMBEDDING_SIZE, 'align').cuda() # att=ATTENTION(4,'align') # mask=att(x,y)#bs*max_len # del data_generator # del data optimizer = torch.optim.Adam( [ { 'params': mix_hidden_layer_3d.parameters() }, { 'params': mix_speech_multiEmbedding.parameters() }, { 'params': mix_speech_classifier.parameters() }, # {'params':query_video_layer.lstm_layer.parameters()}, # {'params':query_video_layer.dense.parameters()}, # {'params':query_video_layer.Linear.parameters()}, { 'params': att_layer.parameters() }, { 'params': att_speech_layer.parameters() }, # ], lr=0.02,momentum=0.9) ], lr=0.0002) if 0 and config.Load_param: # query_video_layer.load_state_dict(torch.load('param_video_layer_19')) mix_speech_classifier.load_state_dict( torch.load('params/param_speech_multilabel_epoch249')) loss_func = torch.nn.MSELoss() # the target label is NOT an one-hotted loss_multi_func = torch.nn.MSELoss( ) # the target label is NOT an one-hotted # loss_multi_func = torch.nn.L1Loss() # the target label is NOT an one-hotted loss_query_class = torch.nn.CrossEntropyLoss() print '''Begin to calculate.''' for epoch_idx in range(config.MAX_EPOCH): print_memory_state(memory.memory) for batch_idx in range(config.EPOCH_SIZE): print '*' * 40, epoch_idx, batch_idx, '*' * 40 train_data_gen = prepare_data('once', 'train') train_data = train_data_gen.next() '''混合语音len,fre,Emb 3D表示层''' mix_speech_hidden = mix_hidden_layer_3d( Variable(torch.from_numpy(train_data['mix_feas'])).cuda()) # 暂时关掉video部分,因为s2 s3 s4 的视频数据不全暂时 '''Speech self Sepration 语音自分离部分''' mix_speech_output = mix_speech_classifier( Variable(torch.from_numpy(train_data['mix_feas'])).cuda()) #从数据里得到ground truth的说话人名字和vector y_spk_list = [ one.keys() for one in train_data['multi_spk_fea_list'] ] y_spk_gtruth, y_map_gtruth = multi_label_vector( y_spk_list, dict_spk2idx) # 如果训练阶段使用Ground truth的分离结果作为判别 if config.Ground_truth: mix_speech_output = Variable( torch.from_numpy(y_map_gtruth)).cuda() top_k_mask_mixspeech = top_k_mask(mix_speech_output, alpha=0.5, top_k=num_labels) #torch.Float型的 mix_speech_multiEmbs = mix_speech_multiEmbedding( top_k_mask_mixspeech) # bs*num_labels(最多混合人个数)×Embedding的大小 #需要计算:mix_speech_hidden[bs,len,fre,emb]和mix_mulEmbedding[bs,num_labels,EMB]的Attention #把 前者扩充为bs*num_labels,XXXXXXXXX的,后者也是,然后用ATT函数计算它们再转回来就好了 mix_speech_hidden_5d = mix_speech_hidden.view( config.BATCH_SIZE, 1, mix_speech_len, speech_fre, config.EMBEDDING_SIZE) mix_speech_hidden_5d = mix_speech_hidden_5d.expand( config.BATCH_SIZE, num_labels, mix_speech_len, speech_fre, config.EMBEDDING_SIZE).contiguous() mix_speech_hidden_5d_last = mix_speech_hidden_5d.view( -1, mix_speech_len, speech_fre, config.EMBEDDING_SIZE) # att_speech_layer=ATTENTION(config.EMBEDDING_SIZE,'align').cuda() att_speech_layer = ATTENTION(config.EMBEDDING_SIZE, 'dot').cuda() att_multi_speech = att_speech_layer( mix_speech_hidden_5d_last, mix_speech_multiEmbs.view(-1, config.EMBEDDING_SIZE)) # print att_multi_speech.size() att_multi_speech = att_multi_speech.view( config.BATCH_SIZE, num_labels, mix_speech_len, speech_fre) # bs,num_labels,len,fre这个东西 # print att_multi_speech.size() multi_mask = att_multi_speech top_k_mask_mixspeech_multi = top_k_mask_mixspeech.view( config.BATCH_SIZE, num_labels, 1, 1).expand(config.BATCH_SIZE, num_labels, mix_speech_len, speech_fre) multi_mask = multi_mask * Variable( top_k_mask_mixspeech_multi).cuda() x_input_map = Variable(torch.from_numpy( train_data['mix_feas'])).cuda() # print x_input_map.size() x_input_map_multi = x_input_map.view( config.BATCH_SIZE, 1, mix_speech_len, speech_fre).expand(config.BATCH_SIZE, num_labels, mix_speech_len, speech_fre) predict_multi_map = multi_mask * x_input_map_multi if batch_idx % 100 == 0: print multi_mask # print predict_multi_map y_multi_map = np.zeros( [config.BATCH_SIZE, num_labels, mix_speech_len, speech_fre], dtype=np.float32) batch_spk_multi_dict = train_data['multi_spk_fea_list'] for idx, sample in enumerate(batch_spk_multi_dict): for spk in sample.keys(): y_multi_map[idx, dict_spk2idx[spk]] = sample[spk] y_multi_map = Variable(torch.from_numpy(y_multi_map)).cuda() loss_multi_speech = loss_multi_func(predict_multi_map, y_multi_map) #各通道和为1的loss部分,应该可以更多的带来差异 y_sum_map = Variable( torch.ones(config.BATCH_SIZE, mix_speech_len, speech_fre)).cuda() predict_sum_map = torch.sum(predict_multi_map, 1) loss_multi_sum_speech = loss_multi_func(predict_sum_map, y_sum_map) loss_multi_speech = loss_multi_speech #todo:以后可以研究下这个和为1的效果对比一下,暂时直接MSE效果已经很不错了。 # loss_multi_speech=loss_multi_speech+0.5*loss_multi_sum_speech if batch_idx == config.EPOCH_SIZE - 1: bss_eval(predict_multi_map, y_multi_map, y_map_gtruth, dict_idx2spk, train_data) print 'training multi-abs norm this batch:', torch.abs( y_multi_map - predict_multi_map).norm().data.cpu().numpy() print 'loss:', loss_multi_speech.data.cpu().numpy() optimizer.zero_grad() # clear gradients for next train loss_multi_speech.backward() # backpropagation, compute gradients optimizer.step() # apply gradients if 1 and epoch_idx > 20 and epoch_idx % 10 == 0 and batch_idx == config.EPOCH_SIZE - 1: torch.save( mix_speech_multiEmbedding.state_dict(), 'params/param_mix_speech_emblayer_{}'.format(epoch_idx)) torch.save( mix_hidden_layer_3d.state_dict(), 'params/param_mix_speech_hidden3d_{}'.format(epoch_idx)) torch.save( att_speech_layer.state_dict(), 'params/param_mix_speech_attlayer_{}'.format(epoch_idx)) # print 'Parameter history:' # for pa_gen in [{'params':mix_hidden_layer_3d.parameters()}, # {'params':mix_speech_multiEmbedding.parameters()}, # {'params':mix_hidden_layer_3d.parameters()}, # {'params':att_speech_layer.parameters()}, # {'params':att_layer.parameters()}, # {'params':mix_speech_classifier.parameters()}, # ]: # print pa_gen['params'].next().data.cpu().numpy()[0] continue 1 / 0 '''视频刺激 Sepration 部分''' # try: # query_video_output,query_video_hidden=query_video_layer(Variable(torch.from_numpy(train_data[4])).cuda()) # except RuntimeError: # print 'RuntimeError here.'+'#'*30 # continue query_video_output, query_video_hidden = query_video_layer( Variable(torch.from_numpy(train_data[4])).cuda()) if config.Comm_with_Memory: #TODO:query更新这里要再检查一遍,最好改成函数,现在有点丑陋。 aim_idx_FromVideoQuery = torch.max(query_video_output, dim=1)[1] #返回最大的参数 aim_spk_batch = [ dict_idx2spk[int(idx.data.cpu().numpy())] for idx in aim_idx_FromVideoQuery ] print 'Query class result:', aim_spk_batch, 'p:', query_video_output.data.cpu( ).numpy() for idx, aim_spk in enumerate(aim_spk_batch): batch_vector = torch.stack( [memory.get_video_vector(aim_spk)]) memory.add_video(aim_spk, query_video_hidden[idx]) query_video_hidden = query_video_hidden + Variable( batch_vector) query_video_hidden = query_video_hidden / torch.sum( query_video_hidden * query_video_hidden, 0) y_class = Variable(torch.from_numpy( np.array([ dict_spk2idx[spk] for spk in train_data['aim_spkname'] ])), requires_grad=False).cuda() print y_class loss_video_class = loss_query_class(query_video_output, y_class) mask = att_layer(mix_speech_hidden, query_video_hidden) #bs*max_len*fre predict_map = mask * Variable( torch.from_numpy(train_data['mix_feas'])).cuda() y_map = Variable(torch.from_numpy(train_data['aim_fea'])).cuda() print 'training abs norm this batch:', torch.abs( y_map - predict_map).norm().data.cpu().numpy() loss_all = loss_func(predict_map, y_map) if 0 and config.Save_param: torch.save(query_video_layer.state_dict(), 'param_video_layer_19_forS1S5') if 0 and epoch_idx < 20: loss = loss_video_class if epoch_idx % 1 == 0 and batch_idx == config.EPOCH_SIZE - 1: torch.save(query_video_layer.state_dict(), 'param_video_layer_19_forS1S5') else: # loss=loss_all+0.1*loss_video_class loss = loss_all optimizer.zero_grad() # clear gradients for next train loss.backward( retain_graph=True) # backpropagation, compute gradients optimizer.step() # apply gradients
def main(): print('go to model') print '*' * 80 spk_global_gen = prepare_data(mode='global', train_or_test='train') #写一个假的数据生成,可以用来写模型先 global_para = spk_global_gen.next() print global_para spk_all_list, dict_spk2idx, dict_idx2spk, mix_speech_len, speech_fre, total_frames, spk_num_total = global_para del spk_global_gen num_labels = len(spk_all_list) #此处顺序是 mix_speechs.shape,mix_feas.shape,aim_fea.shape,aim_spkid.shape,query.shape #一个例子:(5, 17040) (5, 134, 129) (5, 134, 129) (5,) (5, 32, 400, 300, 3) print 'Begin to build the maim model for Multi_Modal Cocktail Problem.' mix_speech_class = MIX_SPEECH_classifier(speech_fre, mix_speech_len, num_labels).cuda() print mix_speech_class if 0 and config.Load_param: # para_name='param_speech_WSJ0_multilabel_epoch42' # para_name='param_speech_WSJ0_multilabel_epoch249' # para_name='param_speech_123_WSJ0_multilabel_epoch75' # para_name='param_speech_123_WSJ0_multilabel_epoch24' para_name = 'param_speech_123onezero_WSJ0_multilabel_epoch75' #top3 召回率80% para_name = 'param_speech_123onezeroag_WSJ0_multilabel_epoch80' #83.6 para_name = 'param_speech_123onezeroag1_WSJ0_multilabel_epoch45' para_name = 'param_speech_123onezeroag2_WSJ0_multilabel_epoch40' para_name = 'param_speech_123onezeroag4_WSJ0_multilabel_epoch75' para_name = 'param_speech_123onezeroag3_WSJ0_multilabel_epoch40' para_name = 'param_speech_123onezeroag4_WSJ0_multilabel_epoch20' para_name = 'param_speech_4lstm_multilabelloss30map_epoch440' # mix_speech_class.load_state_dict(torch.load('params/param_speech_multilabel_epoch249')) mix_speech_class.load_state_dict( torch.load('params/{}'.format(para_name))) print 'Load Success:', para_name optimizer = torch.optim.Adam( [ { 'params': mix_speech_class.parameters() }, # {'params':query_video_layer.lstm_layer.parameters()}, # {'params':query_video_layer.dense.parameters()}, # {'params':query_video_layer.Linear.parameters()}, # {'params':att_layer.parameters()}, # ], lr=0.02,momentum=0.9) ], lr=0.00001) # loss_func = torch.nn.KLDivLoss() # the target label is NOT an one-hotted loss_func = torch.nn.MultiLabelSoftMarginLoss( ) # the target label is NOT an one-hotted # loss_func = torch.nn.MSELoss() # the target label is NOT an one-hotted # loss_func = torch.nn.CrossEntropyLoss() # the target label is NOT an one-hotted # loss_func = torch.nn.MultiLabelMarginLoss() # the target label is NOT an one-hotted # loss_func = torch.nn.L1Loss() # the target label is NOT an one-hotted print '''Begin to calculate.''' for epoch_idx in range(config.MAX_EPOCH): if epoch_idx % 50 == 0: for ee in optimizer.param_groups: ee['lr'] /= 2 acc_all, acc_line = 0, 0 if epoch_idx > 0: print 'recal_rate this epoch {}: {}'.format( epoch_idx, recall_rate_list.mean()) recall_rate_list = np.array([]) for batch_idx in range(config.EPOCH_SIZE): print '*' * 40, epoch_idx, batch_idx, '*' * 40 train_data_gen = prepare_data('once', 'train') train_data = train_data_gen.next() mix_speech = mix_speech_class( Variable(torch.from_numpy(train_data['mix_feas'])).cuda()) y_spk, y_map = multi_label_vector(train_data['multi_spk_fea_list'], dict_spk2idx) y_map = Variable(torch.from_numpy(y_map)).cuda() y_out_batch = mix_speech.data.cpu().numpy() acc1, acc2, all_num_batch, all_line_batch, recall_rate = count_multi_acc( y_out_batch, y_spk, alpha=-0.1, top_k_num=2) acc_all += acc1 acc_line += acc2 recall_rate_list = np.append(recall_rate_list, recall_rate) # print 'training abs norm this batch:',torch.abs(y_map-predict_map).norm().data.cpu().numpy() for i in range(config.BATCH_SIZE): print 'aim:{}-->{},predict:{}'.format( train_data['multi_spk_fea_list'][i].keys(), y_spk[i], mix_speech.data.cpu().numpy()[i][ y_spk[i]]) #除了输出目标的几个概率,也输出倒数四个的 print 'last 4 probility:{}'.format( mix_speech.data.cpu().numpy()[i] [-5:]) #除了输出目标的几个概率,也输出倒数四个的 print '\nAcc for this batch: all elements({}) acc--{},all sample({}) acc--{} recall--{}'.format( all_num_batch, acc1, all_line_batch, acc2, recall_rate) # continue # if epoch_idx==0 and batch_idx<50: # loss=loss_func(mix_speech,100*y_map) # else: # loss=loss_func(mix_speech,y_map) # loss=loss_func(mix_speech,30*y_map) loss = loss_func(mix_speech, y_map) loss_sum = loss_func(mix_speech.sum(1), y_map.sum(1)) print 'loss this batch:', loss.data.cpu().numpy( ), loss_sum.data.cpu().numpy() print 'time:', time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) # continue # loss=loss+0.2*loss_sum optimizer.zero_grad() # clear gradients for next train loss.backward() # backpropagation, compute gradients optimizer.step() # apply gradients if config.Save_param and epoch_idx > 10 and epoch_idx % 5 == 0: try: torch.save( mix_speech_class.state_dict(), 'params/param_speech_123onezeroag5dropout_{}_multilabel_epoch{}' .format(config.DATASET, epoch_idx)) except: print '\n\nSave paras failed ~! \n\n\n' # Print the Params history , that it proves well. # print 'Parameter history:' # for pa_gen in [{'params':mix_hidden_layer_3d.parameters()}, # {'params':query_video_layer.lstm_layer.parameters()}, # {'params':query_video_layer.dense.parameters()}, # {'params':query_video_layer.Linear.parameters()}, # {'params':att_layer.parameters()}, # ]: # print pa_gen['params'].next() print 'Acc for this epoch: all elements acc--{},all sample acc--{}'.format( acc_all / config.EPOCH_SIZE, acc_line / config.EPOCH_SIZE)
def main(): print('go to model') print '*' * 80 spk_global_gen = prepare_data(mode='global', train_or_test='train') #写一个假的数据生成,可以用来写模型先 global_para = spk_global_gen.next() print global_para spk_all_list, dict_spk2idx, dict_idx2spk, mix_speech_len, speech_fre, total_frames, spk_num_total = global_para del spk_global_gen num_labels = len(spk_all_list) print 'print num_labels:', num_labels # data_generator=prepare_data('once','train') # data_generator=prepare_data_fake(train_or_test='train',num_labels=num_labels) #写一个假的数据生成,可以用来写模型先 #此处顺序是 mix_speechs.shape,mix_feas.shape,aim_fea.shape,aim_spkid.shape,query.shape #一个例子:(5, 17040) (5, 134, 129) (5, 134, 129) (5,) (5, 32, 400, 300, 3) # datasize=prepare_datasize(data_generator) # mix_speech_len,speech_fre,total_frames,spk_num_total,video_size=datasize print 'Begin to build the maim model for Multi_Modal Cocktail Problem.' # data=data_generator.next() # This part is to build the 3D mix speech embedding maps. mix_hidden_layer_3d = MIX_SPEECH(speech_fre, mix_speech_len).cuda() mix_speech_classifier = MIX_SPEECH_classifier(speech_fre, mix_speech_len, num_labels).cuda() mix_speech_multiEmbedding = SPEECH_EMBEDDING( num_labels, config.EMBEDDING_SIZE, spk_num_total + config.UNK_SPK_SUPP).cuda() print mix_hidden_layer_3d print mix_speech_classifier # mix_speech_hidden=mix_hidden_layer_3d(Variable(torch.from_numpy(data[1])).cuda()) # mix_speech_output=mix_speech_classifier(Variable(torch.from_numpy(data[1])).cuda()) # 技巧:alpha0的时候,就是选出top_k,top_k很大的时候,就是选出来大于alpha的 # top_k_mask_mixspeech=top_k_mask(mix_speech_output,alpha=config.ALPHA,top_k=config.MAX_MIX) # top_k_mask_mixspeech=top_k_mask(mix_speech_output,alpha=config.ALPHA,top_k=3) # print top_k_mask_mixspeech # mix_speech_multiEmbs=mix_speech_multiEmbedding(top_k_mask_mixspeech) # bs*num_labels(最多混合人个数)×Embedding的大小 # mix_speech_multiEmbs=mix_speech_multiEmbedding(Variable(torch.from_numpy(top_k_mask_mixspeech),requires_grad=False).cuda()) # bs*num_labels(最多混合人个数)×Embedding的大小 # 需要计算:mix_speech_hidden[bs,len,fre,emb]和mix_mulEmbedding[bs,num_labels,EMB]的Attention # 把 前者扩充为bs*num_labels,XXXXXXXXX的,后者也是,然后用ATT函数计算它们再转回来就好了 # mix_speech_hidden_5d=mix_speech_hidden.view(config.BATCH_SIZE,1,mix_speech_len,speech_fre,config.EMBEDDING_SIZE) # mix_speech_hidden_5d=mix_speech_hidden_5d.expand(config.BATCH_SIZE,num_labels,mix_speech_len,speech_fre,config.EMBEDDING_SIZE).contiguous() # mix_speech_hidden_5d=mix_speech_hidden_5d.view(-1,mix_speech_len,speech_fre,config.EMBEDDING_SIZE) # att_speech_layer=ATTENTION(config.EMBEDDING_SIZE,'align').cuda() # att_multi_speech=att_speech_layer(mix_speech_hidden_5d,mix_speech_multiEmbs.view(-1,config.EMBEDDING_SIZE)) # print att_multi_speech.size() # att_multi_speech=att_multi_speech.view(config.BATCH_SIZE,num_labels,mix_speech_len,speech_fre,-1) # print att_multi_speech.size() # This part is to conduct the video inputs. # query_video_layer=VIDEO_QUERY(total_frames,config.VideoSize,spk_num_total).cuda() query_video_layer = None # print query_video_layer # query_video_output,xx=query_video_layer(Variable(torch.from_numpy(data[4]))) # This part is to conduct the memory. # hidden_size=(config.HIDDEN_UNITS) hidden_size = (config.EMBEDDING_SIZE) memory = MEMORY(spk_num_total + config.UNK_SPK_SUPP, hidden_size) memory.register_spklist(spk_all_list) #把spk_list注册进空的memory里面去 # Memory function test. print 'memory all spkid:', memory.get_all_spkid() # print memory.get_image_num('Unknown_id') # print memory.get_video_vector('Unknown_id') # print memory.add_video('Unknown_id',Variable(torch.ones(300))) # This part is to test the ATTENTION methond from query(~) to mix_speech # x=torch.arange(0,24).view(2,3,4) # y=torch.ones([2,4]) att_layer = ATTENTION(config.EMBEDDING_SIZE, 'align').cuda() att_speech_layer = ATTENTION(config.EMBEDDING_SIZE, 'align').cuda() # att=ATTENTION(4,'align') # mask=att(x,y)#bs*max_len # del data_generator # del data optimizer = torch.optim.Adam( [ { 'params': mix_hidden_layer_3d.parameters() }, { 'params': mix_speech_multiEmbedding.parameters() }, { 'params': mix_speech_classifier.parameters() }, # {'params':query_video_layer.lstm_layer.parameters()}, # {'params':query_video_layer.dense.parameters()}, # {'params':query_video_layer.Linear.parameters()}, { 'params': att_layer.parameters() }, { 'params': att_speech_layer.parameters() }, # ], lr=0.02,momentum=0.9) ], lr=0.0002) if 1 and config.Load_param: # query_video_layer.load_state_dict(torch.load('param_video_layer_19')) # mix_speech_classifier.load_state_dict(torch.load('params/param_speech_multilabel_epoch249')) mix_speech_classifier.load_state_dict( torch.load('params/param_speech_WSJ0_multilabel_epoch249')) mix_hidden_layer_3d.load_state_dict( torch.load('params/param_mix101_WSJ0_hidden3d_180')) mix_speech_multiEmbedding.load_state_dict( torch.load('params/param_mix101_WSJ0_emblayer_180')) att_speech_layer.load_state_dict( torch.load('params/param_mix101_WSJ0_attlayer_180')) loss_func = torch.nn.MSELoss() # the target label is NOT an one-hotted loss_multi_func = torch.nn.MSELoss( ) # the target label is NOT an one-hotted # loss_multi_func = torch.nn.L1Loss() # the target label is NOT an one-hotted loss_query_class = torch.nn.CrossEntropyLoss() print '''Begin to calculate.''' for epoch_idx in range(config.MAX_EPOCH): if epoch_idx > 0: print 'SDR_SUM (len:{}) for epoch {} : '.format( SDR_SUM.shape, epoch_idx - 1, SDR_SUM.mean()) SDR_SUM = np.array([]) # print_memory_state(memory.memory) print 'SDR_SUM for epoch {}:{}'.format(epoch_idx - 1, SDR_SUM.mean()) for batch_idx in range(config.EPOCH_SIZE): print '*' * 40, epoch_idx, batch_idx, '*' * 40 # train_data_gen=prepare_data('once','train') train_data_gen = prepare_data('once', 'test') # train_data_gen=prepare_data('once','eval_test') train_data = train_data_gen.next() # test_data_gen=prepare_data('once','test') # test_data=train_data_gen.next() '''混合语音len,fre,Emb 3D表示层''' mix_speech_hidden = mix_hidden_layer_3d( Variable(torch.from_numpy(train_data['mix_feas'])).cuda()) # 暂时关掉video部分,因为s2 s3 s4 的视频数据不全暂时 '''Speech self Sepration 语音自分离部分''' mix_speech_output = mix_speech_classifier( Variable(torch.from_numpy(train_data['mix_feas'])).cuda()) #从数据里得到ground truth的说话人名字和vector # y_spk_list=[one.keys() for one in train_data['multi_spk_fea_list']] # y_spk_gtruth,y_map_gtruth=multi_label_vector(y_spk_list,dict_spk2idx) # 如果训练阶段使用Ground truth的分离结果作为判别 if 0 and config.Ground_truth: mix_speech_output = Variable( torch.from_numpy(y_map_gtruth)).cuda() if test_all_outputchannel: #把输入的mask改成全1,可以用来测试输出所有的channel mix_speech_output = Variable( torch.ones( config.BATCH_SIZE, num_labels, )) y_map_gtruth = np.ones([config.BATCH_SIZE, num_labels]) # top_k_mask_mixspeech=top_k_mask(mix_speech_output,alpha=0.5,top_k=num_labels) #torch.Float型的 max_num_labels = 2 top_k_mask_mixspeech = top_k_mask( mix_speech_output, alpha=-1, top_k=max_num_labels) #torch.Float型的 mix_speech_multiEmbs = mix_speech_multiEmbedding( top_k_mask_mixspeech) # bs*num_labels(最多混合人个数)×Embedding的大小 #需要计算:mix_speech_hidden[bs,len,fre,emb]和mix_mulEmbedding[bs,num_labels,EMB]的Attention #把 前者扩充为bs*num_labels,XXXXXXXXX的,后者也是,然后用ATT函数计算它们再转回来就好了 mix_speech_hidden_5d = mix_speech_hidden.view( config.BATCH_SIZE, 1, mix_speech_len, speech_fre, config.EMBEDDING_SIZE) mix_speech_hidden_5d = mix_speech_hidden_5d.expand( config.BATCH_SIZE, num_labels, mix_speech_len, speech_fre, config.EMBEDDING_SIZE).contiguous() mix_speech_hidden_5d_last = mix_speech_hidden_5d.view( -1, mix_speech_len, speech_fre, config.EMBEDDING_SIZE) # att_speech_layer=ATTENTION(config.EMBEDDING_SIZE,'align').cuda() att_speech_layer = ATTENTION(config.EMBEDDING_SIZE, 'dot').cuda() att_multi_speech = att_speech_layer( mix_speech_hidden_5d_last, mix_speech_multiEmbs.view(-1, config.EMBEDDING_SIZE)) # print att_multi_speech.size() att_multi_speech = att_multi_speech.view( config.BATCH_SIZE, num_labels, mix_speech_len, speech_fre) # bs,num_labels,len,fre这个东西 # print att_multi_speech.size() multi_mask = att_multi_speech top_k_mask_mixspeech_multi = top_k_mask_mixspeech.view( config.BATCH_SIZE, num_labels, 1, 1).expand(config.BATCH_SIZE, num_labels, mix_speech_len, speech_fre) multi_mask = multi_mask * Variable( top_k_mask_mixspeech_multi).cuda() x_input_map = Variable(torch.from_numpy( train_data['mix_feas'])).cuda() # print x_input_map.size() x_input_map_multi = x_input_map.view( config.BATCH_SIZE, 1, mix_speech_len, speech_fre).expand(config.BATCH_SIZE, num_labels, mix_speech_len, speech_fre) predict_multi_map = multi_mask * x_input_map_multi if batch_idx % 100 == 0: print multi_mask # print predict_multi_map bss_eval_fromGenMap(predict_multi_map, top_k_mask_mixspeech, dict_idx2spk, train_data) SDR_SUM = np.append(SDR_SUM, bss_test.cal('batch_output/', 2)) continue optimizer.zero_grad() # clear gradients for next train loss_multi_speech.backward() # backpropagation, compute gradients optimizer.step() # apply gradients if 1 and epoch_idx > 20 and epoch_idx % 10 == 0 and batch_idx == config.EPOCH_SIZE - 1: torch.save( mix_speech_multiEmbedding.state_dict(), 'params/param_mix_{}_emblayer_{}'.format( config.DATASET, epoch_idx)) torch.save( mix_hidden_layer_3d.state_dict(), 'params/param_mix_{}_hidden3d_{}'.format( config.DATASET, epoch_idx)) torch.save( att_speech_layer.state_dict(), 'params/param_mix_{}_attlayer_{}'.format( config.DATASET, epoch_idx))