Esempio n. 1
0
def evaluation(tweets,
               labels=None,
               file_path=None,
               model=model,
               tokenizer=tokenizer,
               max_len=512,
               batch_size=1):
    since = time.time()
    dataloader_test = create_data_loader(tokenizer,
                                         tweets,
                                         labels,
                                         max_len=max_len,
                                         batch_size=batch_size)
    logging.info("Данные для предсказания обработаны")
    # загрузить модель исходную или дообученную модель
    if file_path:
        try:
            load_model(file_path + '/model.pt', model)
        except FileNotFoundError:
            print('Дообученная модель отсутствует. Используется исходная')

    labels_pred, labels_true, time_eval = evaluate(model, dataloader_test)
    all_time = time.time() - since
    logging.info("Предсказание завершено")
    return {
        'labels_pred': labels_pred,
        'labels_true': labels_true,
        'time_eval': time_eval,
        'all_time': all_time
    }
Esempio n. 2
0
def process(conf, wave_path, epoch):
    types2id, id2types = init_types_and_id('/local/data/zcs/sound_set/')

    with open(conf) as fp:
        conf = yaml.safe_load(fp)

    model = import_class(conf['model']['name'])(**conf['model']['args'])
    print('total model parameters: %.2f K' % (model.total_parameter() / 1024))
    model, checkpoint_path = sl.load_model(conf['checkpoint'], int(epoch),
                                           model)
    print(f'load from {checkpoint_path}')

    model.eval()

    pcm, sr = sf.read(wave_path)
    spec = librosa.stft(pcm,
                        n_fft=1024,
                        hop_length=512,
                        window='hann',
                        center=False)
    spec_mag = np.abs(spec[1:, :].T)  #频域能量,不使用相位信息
    tensor = torch.from_numpy(spec_mag).unsqueeze(0)

    preds = model(tensor)
    p = torch.nn.Softmax()(preds).detach().numpy()
    #for i, v in enumerate(preds.detach().numpy()):
    for i, v in enumerate(p):
        print(id2types[i], '\t%.2f' % v)
Esempio n. 3
0
def main(conf):
    with open(conf) as fp:
        conf = yaml.safe_load(fp)
    # print(conf)

    vis = zv.ZcsVisdom(server=conf['visdom']['ip'],
                       port=conf['visdom']['port'])

    dev_dataloader = get_dev_dataloader(conf)

    model = import_class(conf['model']['name'])(**conf['model']['args'])
    os.environ["CUDA_VISIBLE_DEVICES"] = conf['train']['gpu_ids']
    n_gpus = torch.cuda.device_count()
    zp.B('use %d gpus [%s]' % (n_gpus, conf['train']['gpu_ids']))

    model = model.cuda()
    model = torch.nn.DataParallel(model, device_ids=[i for i in range(n_gpus)])
    # model = sl.load_model(conf['checkpoint'], -1, model)

    loss_fn = import_class(conf['loss']['name'])(**conf['loss']['args'])
    loss_fn.cuda()

    for i in range(0, 40):
        model = sl.load_model(conf['checkpoint'], i, model)
        validation(model, loss_fn, dev_dataloader, vis, conf)
        break
Esempio n. 4
0
def generate():
    model = m.WaveNet(n_layers=10 * 3, hidden_channels=32)
    model = sl.load_model('./checkpoint', 10000, model)
    model = model.cuda()

    # samplerate, pcm = wavfile.read('./music/wav/jacob_heringman-blame_not_my_lute-03-the_bagpipes-0-29.wav')
    samplerate, pcm = wavfile.read(
        './music/wav/jacob_heringman-blame_not_my_lute-12-robin_hoode-0-29.wav'
    )
    pcm = pcm[600 * 16:600 * 16 + model.receptive_field]
    pcm = d.mu_law_encode(pcm)
    generated = torch.from_numpy(pcm).type(torch.long).cuda()

    for i in range(16000 * 3):
        input = torch.FloatTensor(1, 256, model.receptive_field).zero_().cuda()
        input.scatter_(
            1, generated[-model.receptive_field:].view(1, 1,
                                                       model.receptive_field),
            1.0)
        # print(input[0, :, -1].shape, input[0, :, -1].sum(0))
        pred = model(input)
        value = pred.argmax(dim=1)
        generated = torch.cat((generated, value), 0)
        print(i, value, generated.shape)
    generated = generated.detach().cpu().numpy()
    generated = (generated / 256) * 2 - 1
    mu_gen = mu_law_expansion(generated, 256)
    print(generated.shape, mu_gen.shape)
    # print(mu_gen[3000:3000+100])
    wavfile.write('./out.wav', 16000, (mu_gen * 30000).astype(np.int16))
Esempio n. 5
0
def trans(conf, epoch):
    with open(conf) as fp:
        conf = yaml.safe_load(fp)

    model = import_class(conf['model']['name'])(**conf['model']['args'])
    print('total model parameters: %.2f K' % (model.total_parameter() / 1024))

    model, checkpoint_path = sl.load_model(conf['checkpoint'], epoch, model)
    print(f'load from {checkpoint_path}')
    out_name = os.path.split(conf['checkpoint'])[-1] + '.pt'

    model.eval()
    example = torch.rand(1, 157, 512)
    traced_script_module = torch.jit.trace(model, example)
    traced_script_module.save(out_name)
Esempio n. 6
0
def main(conf):
    with open(conf) as fp:
        conf = yaml.safe_load(fp)

    logger = SummaryWriter(logdir=conf['checkpoint'])

    train_dataset = dataset.TrainDataset('/local/data/zcs/sound_set')
    train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset,
                                                   batch_size=16,
                                                   shuffle=True,
                                                   num_workers=4)

    dev_dataset = dataset.DevDataset('/local/data/zcs/sound_set')
    dev_dataloader = torch.utils.data.DataLoader(dataset=dev_dataset,
                                                 batch_size=4,
                                                 shuffle=False,
                                                 num_workers=2)

    model = import_class(conf['model']['name'])(**conf['model']['args'])
    print('total model parameters: %.2f K' % (model.total_parameter() / 1024))
    os.environ["CUDA_VISIBLE_DEVICES"] = conf['gpu_ids']
    model = model.cuda()
    model, checkpoint_path = sl.load_model(conf['checkpoint'], -1, model)

    loss_fn = torch.nn.CrossEntropyLoss()

    optimizer = torch.optim.Adam(model.parameters())
    optimizer, checkpoint_path = sl.load_optimizer(conf['checkpoint'], -1,
                                                   optimizer)

    scheduler = noam.LRScheduler(optimizer, warmup_steps=1000, init_lr=0.001)
    scheduler, checkpoint_path = sl.load_scheduler(conf['checkpoint'], -1,
                                                   scheduler)

    try:
        trained_epoch = sl.find_last_checkpoint(conf['checkpoint'])
        print('train form epoch %d' % (trained_epoch + 1))
    except Exception as e:
        print('train from the very begining, {}'.format(e))
        trained_epoch = -1
    for epoch in range(trained_epoch + 1, 1000):
        train(model, loss_fn, optimizer, scheduler, train_dataloader, logger,
              epoch)
        sl.save_checkpoint(conf['checkpoint'], epoch, model, optimizer,
                           scheduler)
        evaluate(model, dev_dataloader, logger, epoch)
Esempio n. 7
0
def main(conf):
    with open(conf) as fp:
        conf = yaml.safe_load(fp)
    # print(conf)

    vis = zv.ZcsVisdom(server=conf['visdom']['ip'],
                       port=conf['visdom']['port'])

    train_dataloader = get_train_dataloader(conf)
    dev_dataloader = get_dev_dataloader(conf)

    model = import_class(conf['model']['name'])(**conf['model']['args'])
    os.environ["CUDA_VISIBLE_DEVICES"] = conf['train']['gpu_ids']
    n_gpus = torch.cuda.device_count()
    zp.B('use %d gpus [%s]' % (n_gpus, conf['train']['gpu_ids']))

    model = model.cuda()
    model = torch.nn.DataParallel(model, device_ids=[i for i in range(n_gpus)])
    model = sl.load_model(conf['checkpoint'], -1, model)

    loss_fn = import_class(conf['loss']['name'])(**conf['loss']['args'])
    loss_fn.cuda()

    optimizer = import_class(conf['train']['optimizer']['name'])(
        model.parameters(), **conf['train']['optimizer']['args'])
    optimizer = sl.load_optimizer(conf['checkpoint'], -1, optimizer)

    zp.B('totally %d steps per epoch' % (len(train_dataloader)))
    try:
        trained_epoch = sl.find_last_checkpoint(conf['checkpoint'])
        zp.B('train form epoch %d' % (trained_epoch + 1))
    except Exception as e:
        zp.B('train from the very begining, {}'.format(e))
        trained_epoch = -1
    for epoch in range(trained_epoch + 1, conf['train']['num_epochs']):
        validation(model, loss_fn, dev_dataloader, vis, conf)
        train(model, loss_fn, optimizer, train_dataloader, vis, epoch, conf)
        sl.save_checkpoint(conf['checkpoint'], epoch, model, optimizer)
Esempio n. 8
0
def train(n_epoch):
    dataset = d.Dataset(3070, 160)
    dataloader = torch.utils.data.DataLoader(dataset=dataset,
                                            batch_size=4,
                                            shuffle=True)
    model = m.WaveNet(n_layers=10*3, hidden_channels=32)
    model = model.cuda()
    model.train()

    loss_func = torch.nn.CrossEntropyLoss()
    optim = torch.optim.Adam(params=model.parameters(), lr=0.001)

    try:
        trained_epoch = sl.find_last_checkpoint('./checkpoint')
        print('train form epoch %d' % (trained_epoch + 1)) 
    except Exception as e:
        print('train from the very begining, {}'.format(e))
        trained_epoch = -1
    model = sl.load_model('./checkpoint', -1, model)
    optim = sl.load_optimizer('./checkpoint', -1, optim)
    for epoch in range(trained_epoch+1, n_epoch):
        for step, (batch_x, batch_y) in enumerate(dataloader):
            batch_x = batch_x.cuda()
            batch_y = batch_y.cuda()

            pred = model(batch_x)
            # print(pred.shape, batch_y.shape)
            loss = loss_func(pred, batch_y.view(pred.shape[0]))
            print(epoch, step, loss.detach().cpu().numpy())

            optim.zero_grad()
            loss.backward()
            optim.step()
        if epoch % 100 == 0:
            sl.save_checkpoint('./checkpoint', epoch, model, optim)
    sl.save_checkpoint('./checkpoint', epoch, model, optim)
Esempio n. 9
0
MODEL_PATH = ""

TEST_MODE = False
SPLIT_DATASET = True
SPLIT_RATIO = 0.7 # 0~1
if TEST_MODE: # this is kind of trick to realize test mode, condition of test mode is 1.no training, 2.no train_dataset.
    SPLIT_DATASET = False
    LOAD_MODEL = True


BATCH_SIZE = 64
SHUFFLE = True
NUM_WORKERS = 0 # multithreading

if LOAD_MODEL:
    model = load_model()
else:
    model = Model().to(DEVICE)

EPOCH = 100
LEARNING_RATE = 0.0002
CRITERION = nn.NLLLoss()
OPTIMIZER = Adam(model.parameters(), lr=LEARNING_RATE, eps=1e-08, weight_decay=0)
#############################################################################################################################

# preprocessing, make or load and save dataset
############################################################################################################################# 
if LOAD_DATA == True:

    dataset = load_dataset(DATASET_PATH)
    if SPLIT_DATASET: