Ejemplo n.º 1
0
    def test_train(self):

        output_dir = mkdtemp()

        #CMD line as in README.txt
        args = [
            "--model_desc=config.json", "--output_dir=" + output_dir,
            "--num_train_epochs=1", "--size_to_read=1",
            "--per_gpu_train_batch_size=1", "--total_train_batch_size=8",
            "--dns_datasets=../../../data/noise_suppression/datasets",
            "--logacc=1"
        ]
        main_train(args)

        model_onnx = os.path.join(output_dir, "model.onnx")
        self.assertEqual(True, os.path.exists(model_onnx),
                         model_onnx + " was not created by train script")

        #run convereter to IR as in README.txt
        INTEL_OPENVINO_DIR = os.getenv("INTEL_OPENVINO_DIR")
        export_command = "python {}/deployment_tools/model_optimizer/mo.py".format(
            INTEL_OPENVINO_DIR)
        export_command += " --input_model " + model_onnx
        export_command += " --output_dir " + output_dir
        res = subprocess.run(export_command, shell=True, check=False)
        self.assertEqual(0, res.returncode, "fail to run " + export_command)

        model_xml = os.path.join(output_dir, "model.xml")
        self.assertEqual(True, os.path.exists(model_xml),
                         model_xml + " was not created by Model Optimizer")

        model_bin = os.path.join(output_dir, "model.bin")
        self.assertEqual(True, os.path.exists(model_bin),
                         model_bin + " was not created by Model Optimizer")
Ejemplo n.º 2
0
def main():

    parser = argparse.ArgumentParser(description='PyTorch Tacotron 2 Testing')
    parser = parse_args(parser)
    args, unknown_args = parser.parse_known_args()

    if "train" in args.bench_class:
        main_train()
    else:
        main_infer()
Ejemplo n.º 3
0
from onmt.opts_preprocess import OPT_PREPROCESS 
from onmt.opts_train import OPT_TRAIN 
from onmt.opts_translate import OPT_TRANSLATE 

from preprocess import main as main_preprocess
from train import main as main_train
from translate import main as main_translate

opt_preprocess = OPT_PREPROCESS(reverse=True)
opt_train = OPT_TRAIN(reverse=True)
opt_translate = OPT_TRANSLATE(reverse=True)


if __name__=='__main__':
	main_preprocess(opt_preprocess)
	main_train(opt_train)
	main_translate(opt_translate)
Ejemplo n.º 4
0
def train_model():
    mode = request.args.get('dev')
    score = train.main_train(mode=mode)
    return jsonify({'score': score}), 200, {"mimetype": "application/json"}
Ejemplo n.º 5
0
    logging_group.add_argument('--log_interval', type=int, default=1000, help='log at this interval (defaults to 1000)')
    logging_group.add_argument('--validation' , default=False, action='store_true', help='use validation dataset for validation ')
    logging_group.add_argument('--val_log_interval', type=int, default=1000, help="log the validation output at the given interval")
    logging_group.add_argument('--real_left_v', help='The location of the folder containing the left real images.')
    logging_group.add_argument('--real_right_v', help='The location of the folder containing the right real images.')
    logging_group.add_argument('--disp_left_v', help='The location of the folder containing the left disparity map')  #edit
    logging_group.add_argument('--disp_right_v', help='The location of the folder containing the right disparity map')
    logging_group.add_argument('--batch_size_v', type=int, default=50, help='batch size of the validation data')

    # TPU related Arguments
    tpu_group = parser.add_argument_group("TPU","Arguments for TPU training")
    tpu_group.add_argument("--num_cores", type = int, default=8, help="Defines the number of TPU cores to use")
    tpu_group.add_argument("--loader_prefetch_size", type=int, default=8, help='Defines the loader prefetch queue size')
    tpu_group.add_argument("--device_prefetch_size", type=int, default=4, help='Defines the device prefetch size')

    # Other
    parser.add_argument('--cuda', default=False, action='store_true', help='use cuda')
    parser.add_argument('--tpu', default=False, action='store_true', help='use tpu')
    parser.add_argument('--c1', type=float, default=1, help='smooth loss')
    parser.add_argument('--c2', type=float, default=1, help='recon loss')
    parser.add_argument('--c3', type=float, default=1, help='dipsmi loss')
    parser.add_argument('--c4', type=float, default=1, help='edge loss')
    # Parse arguments
    args = parser.parse_args()
    if args.tpu:
        print("tpu enabled")
        import torch_xla.distributed.xla_multiprocessing as xmp
        xmp.spawn(train.main_train, args=(args,), nprocs=args.num_cores)#, start_method='fork')
    else:
        train.main_train(0,args)
Ejemplo n.º 6
0
                    type=str,
                    help='Relative path to ground truth masks')

# Training parameters
parser.add_argument('--n_epochs',
                    default=101,
                    type=int,
                    help='Number of epochs for training')
config = parser.parse_args()

for o in config.out_res:
    for d in config.scalings:
        for p in config.degree:
            c = o // 2**d

            config.model_path = f"../example_output/saved_models/weights_chd_ct_table1-O{o}-d{d}-p{p}.pth"
            ARGS_TRAIN = [
                "--base_data_dir", config.base_data_dir, "--sub_data_dir",
                config.sub_data_dir, "--base_output_dir",
                config.base_output_dir, "--image_dir", config.image_dir,
                "--mask_dir", config.mask_dir, "--degree",
                str(p), "--scalings",
                str(d), "--code_size",
                str(c), "--model_in", config.model_path, "--model_out",
                config.model_path, "--network", config.network, "--n_epochs",
                str(config.n_epochs)
            ]

            config_args_train = parse_arguments_train(args=ARGS_TRAIN)
            main_train(config_args_train)
def main(args):
    help_str = "Do `python3 src [local, gcloud, floyd, devbox] [predict, train, confusion]`"

    params = {}
    if args[1] == 'floyd':
        print("FLOYD ENV")
        params = {
            'data_path': '/data',
            'output_path': '/output',
            'audio_path': '/data/*/*wav',
            'validation_list_path': '/data/validation_list.txt',
            'tensorboard_root': '/output',
            'sample': False,
            'sample_size': 1000,
            'epochs': 20,
            'batch_size': 64,
            'submission_path': './output/submission',
            'test_path': '???',
            'batch_size_pred': 64
        }
    elif args[1] == 'gcloud':
        print("GCLOUD ENV")
        params = {
            'data_path': '/mnt/data/speech/',
            'output_path': './output',
            'audio_path': '/mnt/data/speech/train/audio/*/*wav',
            'validation_list_path':
            '/mnt/data/speech/train/validation_list.txt',
            'tensorboard_root': './output',
            'sample': False,
            'sample_size': 2000,
            'epochs': 60,
            'batch_size': 64,
            'submission_path': './submissions',
            'test_path': '/mnt/data/speech/test/audio/*wav',
            'batch_size_pred': 64
        }
    elif args[1] == 'devbox':
        print("DEVBOX ENV")
        params = {
            'data_path': '/home/ilya/Data/speech/',
            'output_path': '/home/ilya/Data/speech/out/output',
            'audio_path': '/home/ilya/Data/speech/train/audio/*/*wav',
            'validation_list_path':
            '/home/ilya/Data/speech/train/validation_list.txt',
            'tensorboard_root': '/home/ilya/Data/speech/out/output',
            'sample': False,
            'sample_size': 2000,
            'epochs': 120,
            'batch_size': 128,
            'submission_path': '/home/ilya/Data/speech/out/submissions',
            'test_path': '/home/ilya/Data/speech/test/audio/*wav',
            'batch_size_pred': 64
        }
    elif sys.argv[1] == 'local':
        print("DEV ENV")
        params = {
            'data_path': './data',
            'output_path': './output',
            'audio_path': './data/train/audio/*/*wav',
            'validation_list_path': './data/train/validation_list.txt',
            'tensorboard_root': '/tmp/tensorflow/',
            'sample': True,
            'sample_size': 40,
            'epochs': 10,
            'batch_size': 8,
            'submission_path': './submissions',
            'test_path': './data/test/audio/*wav',
            'batch_size_pred': 1
        }
    else:
        print(help_str)
        exit(-1)

    if len(args) == 4 and args[2] == 'predict':
        params['model_path'] = args[3]
        main_predict(params)
    elif len(args) == 3 and args[2] == 'train':
        main_train(params, Classifier1D)
    elif len(args) == 3 and args[2] == 'train_deep_1d':
        main_train(params, Deep1DClassifier)
    elif len(args) == 3 and args[2] == 'train_resnet_1d':
        main_train(params, Deep1DResnetClassifier)
    elif len(args) == 4 and args[2] == 'confusion':
        params['model_path'] = args[3]
        main_confusion_matrix(params)
    else:
        print(help_str)
        exit(-1)
Ejemplo n.º 8
0
    ###################################################################

    # backup dataset
    check_N_mkdir(os.path.join(hyperparams['folder_name'], 'copy'))
    shutil.copytree(hyperparams['train_dir'],
                    os.path.join(hyperparams['folder_name'], 'copy', 'train'))
    shutil.copytree(hyperparams['val_dir'],
                    os.path.join(hyperparams['folder_name'], 'copy', 'val'))
    shutil.copytree(hyperparams['test_dir'],
                    os.path.join(hyperparams['folder_name'], 'copy', 'test'))

    try:
        hyperparams['max_nb_cls'] = get_max_nb_cls(hyperparams['train_dir'])[1]
        start_time = datetime.datetime.now()
        main_train(hyperparams,
                   grad_view=True,
                   nb_classes=hyperparams['max_nb_cls'])
        train_time = (datetime.datetime.now() - start_time) / 3600
    except Exception as e:
        logger.error('%%%%%%%%%%%%%%%%%%%%Errors during training')
        logger.error(e)

    try:
        # save lr_curves
        check_N_mkdir(os.path.join(hyperparams['folder_name'], 'curves'))
        ac_tn, _, ls_tn, _ = lr_curve_extractor(
            os.path.join(hyperparams['folder_name'], 'train'))
        _, ac_val, _, ls_val = lr_curve_extractor(
            os.path.join(hyperparams['folder_name'], 'test'))
        best_step = ac_val.step.loc[ac_val.value.argmax()]
        # best_step=0
Ejemplo n.º 9
0
import json

import train

CONST_HOMEDIR = os.environ['HOME']

CONST_QSUB_FILEPATH = "{}/qsub".format(os.environ['HOME'])
JOB_ID = int(sys.argv[1])

#Set CWD
os.chdir(CONST_QSUB_FILEPATH)

#Load the JSON file associated with this
f_json = open("{}.json".format(JOB_ID), 'r')
training_args_dict = json.loads(f_json.read())
f_json.close()

#Resolve the $HOME directory
for key in training_args_dict.keys():
  if type(training_args_dict[key]) == str:
    training_args_dict[key] = training_args_dict[key].format(CONST_HOMEDIR)

#Log info
print("Running NCR training with params:")
for key in training_args_dict.keys():
  print("\t{} --> {}".format(key, training_args_dict[key]))

#Start the training
training_args = train.MainTrainArgClass(**training_args_dict)
train.main_train(training_args, JOB_ID)