示例#1
0
def main():
    # torch.multiprocessing.freeze_support()
    use_cuda = torch.cuda.is_available()

    parser = argparse.ArgumentParser()
    parser.add_argument('--seed', type=int, default=1)
    args = parser.parse_args()

    # load configuration from yaml file
    config = HParams.load(
        os.path.join(os.path.dirname(os.path.abspath(__file__)),
                     "hparams.yaml"))
    data_config = config.data_io
    model_config = config.model
    exp_config = config.experiment

    # check asset dir and get logger
    root_dir = "/" if use_cuda else get_project_root("Deep-Generative-Model")
    asset_path = os.path.join(root_dir, "assets",
                              "test")  # change subdirectory
    check_asset_dir(asset_path, config)
    logger.logging_verbosity(1)
    logger.add_filehandler(os.path.join(asset_path, "log.txt"))
    tf_logger = get_tflogger(asset_path)
    # data_config['root_path'] = os.path.join(root_dir, data_config['root_path'])

    # seed
    if args.seed > 0:
        torch.manual_seed(args.seed)
        torch.cuda.manual_seed_all(args.seed)
        np.random.seed(args.seed)
        random.seed(args.seed)

    logger.info("configuration complete")

    # get loader
    train_loader = get_loader(train=True, **data_config)
    test_loader = get_loader(train=False, **data_config)

    for batch in train_loader:
        print(batch['img'].size())
        break
device = torch.device("cuda" if use_cuda else "cpu")

parser = argparse.ArgumentParser()
parser.add_argument('--index', type=int, help='Experiment Number', default='e')
parser.add_argument('--kfold', type=int, help='5 fold (0,1,2,3,4)',default='e')
parser.add_argument('--voca', type=bool, help='large voca is True', default=False)
parser.add_argument('--model', type=str, default='crf')
parser.add_argument('--pre_model', type=str, help='btc, cnn, crnn', default='e')
parser.add_argument('--dataset1', type=str, help='Dataset', default='isophonic_221')
parser.add_argument('--dataset2', type=str, help='Dataset', default='uspop_185')
parser.add_argument('--dataset3', type=str, help='Dataset', default='robbiewilliams')
parser.add_argument('--restore_epoch', type=int, default=1000)
parser.add_argument('--early_stop', type=bool, help='no improvement during 10 epoch -> stop', default=True)
args = parser.parse_args()

config = HParams.load("run_config.yaml")
if args.voca == True:
    config.feature['large_voca'] = True
    config.model['num_chords'] = 170

config.model['probs_out'] = True

# Result save path
asset_path = config.path['asset_path']
ckpt_path = config.path['ckpt_path']
result_path = config.path['result_path']
restore_epoch = args.restore_epoch
experiment_num = str(args.index)
ckpt_file_name = 'idx_'+experiment_num+'_%03d.pth.tar'
tf_logger = TF_Logger(os.path.join(asset_path, 'tensorboard', 'idx_'+experiment_num))
logger.info("==== Experiment Number : %d " % args.index)
示例#3
0
parser.add_argument('--voca', type=bool, help='large voca is True', default=True)
parser.add_argument('--model', type=str, help='btc, cnn, crnn', default='btc')
#-----
parser.add_argument('--dataset1', type=str, help='Dataset', default='ce200')
parser.add_argument('--dataset2', type=str, help='Dataset', default='NA')
parser.add_argument('--dataset3', type=str, help='Dataset', default='NA')
#-----
#parser.add_argument('--dataset1', type=str, help='Dataset', default='isophonic')
#parser.add_argument('--dataset2', type=str, help='Dataset', default='uspop')
#parser.add_argument('--dataset3', type=str, help='Dataset', default='robbiewilliams')
parser.add_argument('--restore_epoch', type=int, default=1000)
parser.add_argument('--early_stop', type=bool, help='no improvement during 10 epoch -> stop', default=True)
args = parser.parse_args()

experiment_num = str(args.index)
config = HParams.load("config/run_config_idx"+experiment_num+".yaml")

if args.voca == True:
    config.feature['large_voca'] = True
    config.model['num_chords'] = 170

# Result save path
asset_path = config.path['asset_path']
ckpt_path = config.path['ckpt_path']
result_path = config.path['result_path']
restore_epoch = args.restore_epoch
#experiment_num = str(args.index)
ckpt_file_name = 'idx_'+experiment_num+'_%03d.pt'
tf_logger = TF_Logger(os.path.join(asset_path, 'tensorboard', 'idx_'+experiment_num))
logger.info("==== Experiment Number : %d " % args.index)
示例#4
0
                    default=True)
parser.add_argument('--model', type=str, help='btc, cnn, crnn', default='btc')
#-----
parser.add_argument('--dataset1', type=str, help='Dataset', default='ce200')
#-----
#parser.add_argument('--dataset1', type=str, help='Dataset', default='isophonic')
#parser.add_argument('--dataset2', type=str, help='Dataset', default='uspop')
#parser.add_argument('--dataset3', type=str, help='Dataset', default='robbiewilliams')
parser.add_argument('--restore_epoch', type=int, default=1)
parser.add_argument('--early_stop',
                    type=bool,
                    help='no improvement during 10 epoch -> stop',
                    default=True)
args = parser.parse_args()

config = HParams.load("config/run_config_idx0.yaml")
if args.voca == True:
    config.feature['large_voca'] = True
    config.model['num_chords'] = 170

# Result save path
asset_path = config.path['asset_path']
ckpt_path = config.path['ckpt_path']
result_path = config.path['result_path']
restore_epoch = args.restore_epoch
experiment_num = str(args.index)
ckpt_file_name = 'idx_' + experiment_num + '_%03d.pt'
tf_logger = TF_Logger(
    os.path.join(asset_path, 'tensorboard', 'idx_' + experiment_num))
logger.info("==== Experiment Number : %d " % args.index)
示例#5
0
                    type=int,
                    default="0",
                    help='GPU index')
parser.add_argument('--ngpu', type=int, default=4, help='0 = CPU.')
parser.add_argument('--optim_name', type=str, default='adam')
parser.add_argument('--restore_epoch', type=int, default=-1)
parser.add_argument('--load_rhythm', dest='load_rhythm', action='store_true')
parser.add_argument('--seed', type=int, default=1)
args = parser.parse_args()

use_cuda = torch.cuda.is_available()
device = torch.device("cuda:%d" % args.gpu_index if use_cuda else "cpu")

hparam_file = os.path.join(os.getcwd(), "hparams.yaml")

config = HParams.load(hparam_file)
data_config = config.data_io
model_config = config.model
exp_config = config.experiment

# configuration
asset_root = config.asset_root
asset_path = os.path.join(asset_root, 'idx%03d' % args.idx)
make_save_dir(asset_path, config)
logger.logging_verbosity(1)
logger.add_filehandler(os.path.join(asset_path, "log.txt"))

# seed
if args.seed > 0:
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
示例#6
0
import torch
import numpy as np
import random
import os

if __name__ == '__main__':
    torch.multiprocessing.freeze_support()
    use_cuda = torch.cuda.is_available()

    parser = argparse.ArgumentParser()
    parser.add_argument('--seed', type=int, default=1)
    args = parser.parse_args()

    # load configuration from yaml file
    config = HParams.load(
        os.path.join(os.path.dirname(os.path.abspath(__file__)),
                     "hparams.yaml"))
    data_config = config.data_io
    model_config = config.model
    exp_config = config.experiment

    # check asset dir and get logger
    root_dir = "/" if use_cuda else get_project_root("Deep-Generative-Model")
    asset_path = os.path.join(root_dir, "assets",
                              "test")  # change subdirectory
    check_asset_dir(asset_path, config)
    logger.logging_verbosity(1)
    logger.add_filehandler(os.path.join(asset_path, "log.txt"))
    tf_logger = get_tflogger(asset_path)
    data_config['root_path'] = os.path.join(root_dir, data_config['root_path'])