def tempos(): global TEMPO global SONGNAME r = request.json if (type(r) == dict): r_json = r else: r_json = json.loads(r) t1 = r_json['tempo_1'] t2 = r_json['tempo_2'] tempo_1 = np.asarray(t1) tempo_2 = np.asarray(t2) TEMPO = np.array([tempo_1, tempo_2]) song1 = path + SONGNAME[0] # heyjude song2 = path + SONGNAME[1] # someonelikeyou m, c, tempo = model.load_midi(song1, song2, UNIT_LEN) m_seq, c_seq, z = model.interp_sample(vae, m, c, INTERP_NUM, RHYTHM_THRESHOLD) response_pickled = numpy2json(m_seq, c_seq, z, TEMPO, SONGNAME, RHYTHM_THRESHOLD) return Response(response=response_pickled, status=200, mimetype="application/json")
def static_twosong(s1, s2, num): with torch.no_grad(): global UNIT_LEN global INTERP_NUM global TOTAL_LEN global path global RHYTHM_THRESHOLD global TEMPO global SONGNAME INTERP_NUM = num # number of interp group # TOTAL_LEN = (INTERP_NUM + 2)*4 # number of group * 4bar = total bars song1 = path + songfiles[int(s1)] song2 = path + songfiles[int(s2)] # print('song1:',song1) # print('song2:',song2) m, c, tempo = model.load_midi(song1, song2, UNIT_LEN) # print(m.shape) # print(c.shape) # print(tempo.shape) m_seq, c_seq, z = model.interp_sample(vae, m, c, INTERP_NUM, RHYTHM_THRESHOLD) print("z", np.shape(z)) TEMPO = tempo SONGNAME = np.array([songfiles[int(s1)],songfiles[int(s2)]]) response_pickled = numpy2json(m_seq, c_seq, z, TEMPO, SONGNAME, RHYTHM_THRESHOLD) return Response(response=response_pickled, status=200, mimetype="application/json")
def theta(): global RHYTHM_THRESHOLD global SONGNAME r = request.json if (type(r) == dict): r_json = r else: r_json = json.loads(r) theta_temp = r_json['theta'] theta = np.float(theta_temp) with torch.no_grad(): RHYTHM_THRESHOLD = theta song1 = path + SONGNAME[0] # heyjude song2 = path + SONGNAME[1] # someonelikeyou # print('song1:',song1) # print('song2:',song2) m, c, tempo = model.load_midi(song1, song2, UNIT_LEN) m_seq, c_seq, z = model.interp_sample(vae, m, c, INTERP_NUM, RHYTHM_THRESHOLD) response_pickled = numpy2json(m_seq, c_seq, z, TEMPO, SONGNAME, RHYTHM_THRESHOLD) return Response(response=response_pickled, status=200, mimetype="application/json")
def static(): with torch.no_grad(): global UNIT_LEN global INTERP_NUM global TOTAL_LEN global path global RHYTHM_THRESHOLD global TEMPO global SONGNAME song1 = path + songfiles[1] # heyjude song2 = path + songfiles[2] # someonelikeyou # print('song1:',song1) # print('song2:',song2) m, c, tempo = model.load_midi(song1, song2, UNIT_LEN) m_seq, c_seq = model.interp_sample(vae, m, c, INTERP_NUM, RHYTHM_THRESHOLD) TEMPO = tempo SONGNAME = np.array([songfiles[1], songfiles[2]]) response_pickled = numpy2json(m_seq, c_seq, TEMPO, SONGNAME, RHYTHM_THRESHOLD) return Response(response=response_pickled, status=200, mimetype="application/json")