def main(exp, frame_sizes, generate_from, **params): params = dict(default_params, exp=exp, frame_sizes=frame_sizes, generate_from=generate_from, **params) model = SampleRNN( frame_sizes=params['frame_sizes'], n_rnn=params['n_rnn'], dim=params['dim'], learn_h0=params['learn_h0'], q_levels=params['q_levels'], nb_classes=params['nb_classes'], weight_norm=params['weight_norm'], ) # model = SampleRNN([16, 4], 2, 1024, True, 256, True) print('Loading saved model' + params['generate_from']) checkpoint = torch.load(params['generate_from']) temporary_dict = {} for k, v in checkpoint.items(): temporary_dict[k[6:]] = v checkpoint = temporary_dict model.load_state_dict(checkpoint) if not os.path.exists(params['generate_to']): os.mkdir(params['generate_to']) print(params['cond']) generator = GeneratorPlugin(params['generate_to'], params['n_samples'], params['sample_length'], params['sample_rate'], params['nb_classes'], params['cond']) generator.register_generate(model.cuda(), params['cuda']) generator.epoch(exp)
def main(checkpoint, **args): task_id = setup_logging( 'gen', logging.NOTSET if args.get('debug', False) else logging.INFO) params = dict( { 'n_rnn': 3, 'dim': 1024, 'learn_h0': False, 'q_levels': 256, 'weight_norm': True, 'frame_sizes': [16, 16, 4], 'sample_rate': 16000, 'n_samples': 1, 'sample_length': 16000 * 60 * 4, 'sampling_temperature': 1, 'q_method': QMethod.LINEAR, }, exp=checkpoint, **args) logging.info(str(params)) logging.info('booting') # dataset = storage_client.list_blobs(bucket, prefix=path) # for blob in dataset: # blob.download_to_filename(blob.name) bucket = None if args['bucket']: logging.debug('setup google storage bucket {}'.format(args['bucket'])) storage_client = storage.Client() bucket = Bucket(storage_client, args['bucket']) preload_checkpoint(checkpoint, storage_client, bucket) results_path = os.path.abspath( os.path.join(checkpoint, os.pardir, os.pardir, task_id)) ensure_dir_exists(results_path) checkpoint = os.path.abspath(checkpoint) tmp_pretrained_state = torch.load( checkpoint, map_location=lambda storage, loc: storage.cuda(0) if args['cuda'] else storage) # Load all tensors onto GPU 1 # torch.load('tensors.pt', map_location=lambda storage, loc: storage.cuda(1)) pretrained_state = OrderedDict() for k, v in tmp_pretrained_state.items(): # Delete "model." from key names since loading the checkpoint automatically attaches it layer_name = k.replace("model.", "") pretrained_state[layer_name] = v # print("k: {}, layer_name: {}, v: {}".format(k, layer_name, np.shape(v))) # Create model with same parameters as used in training model = SampleRNN(frame_sizes=params['frame_sizes'], n_rnn=params['n_rnn'], dim=params['dim'], learn_h0=params['learn_h0'], q_levels=params['q_levels'], weight_norm=params['weight_norm']) if params['cuda']: model = model.cuda() # Load pretrained model model.load_state_dict(pretrained_state) def upload(file_path): if bucket is None: return # remove prefix /app name = file_path.replace(os.path.abspath(os.curdir) + '/', '') blob = Blob(name, bucket) logging.info('uploading {}'.format(name)) blob.upload_from_filename(file_path) (_, dequantize) = quantizer(params['q_method']) gen = Gen(Runner(model), params['cuda']) gen.register_plugin( GeneratorPlugin(results_path, params['n_samples'], params['sample_length'], params['sample_rate'], params['q_levels'], dequantize, params['sampling_temperature'], upload)) gen.run()
q_levels=params['q_levels'], weight_norm=params['weight_norm'] ) #model = model.cuda() # Delete "model." from key names since loading the checkpoint automatically attaches it to the key names pretrained_state = torch.load(PRETRAINED_PATH) new_pretrained_state = OrderedDict() for k, v in pretrained_state.items(): layer_name = k.replace("model.", "") new_pretrained_state[layer_name] = v # print("k: {}, layer_name: {}, v: {}".format(k, layer_name, np.shape(v))) # Load pretrained model model.load_state_dict(new_pretrained_state) # Generate Plugin num_samples = 1 # params['n_samples'] sample_length = params['sample_length'] sample_rate = params['sample_rate'] sampling_temperature = params['sampling_temperature'] # Override from our options sample_length = sample_rate * int(options.length) print("Number samples: {}, sample_length: {}, sample_rate: {}".format(num_samples, sample_length, sample_rate)) print("Generating %d seconds of audio" % (sample_length / sample_rate)) generator = GeneratorPlugin(GENERATED_PATH, num_samples, sample_length, sample_rate, sampling_temperature) # Call new register function to accept the trained model and the cuda setting