Exemplo n.º 1
0
def initialize(
    mode: str,
    config_path: str,
    resume: str = "",
    multi_gpu: bool = False,
    gpu_id: int = -1,
) -> Tuple[Dict[str, Any], str, torch.device]:
    """Intialize."""
    # setup device
    device = torch.device("cpu")
    if torch.cuda.is_available():
        os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
            f"{i}" for i in range(torch.cuda.device_count()))
        if multi_gpu:
            device = torch.device("cuda")
        elif 0 <= gpu_id < torch.cuda.device_count():
            device = torch.device(f"cuda:{gpu_id}")

    # create directory
    dirs_in_save = ["", "data", "checkpoint"]
    dirs_in_save += [os.path.join("checkpoint", curr_time)
                     ] if not resume else []
    for name in dirs_in_save:
        path = os.path.join("save", name)
        if not os.path.exists(path):
            os.mkdir(path)

    # resume or load existing configurations
    if resume:
        dir_prefix = resume
        assert os.path.exists(dir_prefix), f"{dir_prefix} does not exist"
        config_path = glob.glob(os.path.join(dir_prefix, "*.py"))[0]
        config_name = os.path.basename(config_path)
    else:
        assert os.path.exists(config_path), "--config required"
        dir_prefix = os.path.join(checkpt_path, curr_time)
        config_name = os.path.basename(config_path)
        shutil.copyfile(config_path, os.path.join(dir_prefix, config_name))
    config = run_path(config_path)["config"]

    # set logger
    config_name = os.path.splitext(config_name)[0]
    utils.set_logger(filename=os.path.join(dir_prefix, f"{config_name}.log"))

    # config validation check
    if mode == "train":
        TrainConfigValidator(config).check()
    elif mode == "prune":
        PruneConfigValidator(config).check()
    elif mode == "quantize":
        QuantizeConfigValidator(config).check()
    elif mode == "shrink":
        ShrinkConfigValidator(config).check()
    else:
        raise NotImplementedError

    # set random seed
    utils.set_random_seed(config["SEED"])

    return config, dir_prefix, device
def test_get_batch_prepro(tfrecord_path,params):

    logger = set_logger("./test.log")
    batch, init_op, nb_train, nb_val = get_batch_prepro(tfrecord_path,params,logger)

    mel, mel_mask = batch['mels'], batch['mels_mask']
    s1, s2 = tf.reduce_mean(mel), tf.reduce_sum(mel*mel_mask)/tf.reduce_sum(mel_mask)

    with tf.Session() as sess:
        sess.run(init_op)
        batch_dict = sess.run(batch)
        print("Mean: {}, with masking: {}".format(*sess.run([s1,s2])))
seed = 7
np.random.seed(seed)
tf.keras.backend.clear_session()

# Mixed precision
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_policy(policy)
print('Compute dtype: %s' % policy.compute_dtype)
print('Variable dtype: %s' % policy.variable_dtype)

# Path to save train results
path_save_train_results = Path("train_results")
path_save_train_results.mkdir(parents=True, exist_ok=True)

# Create logger
logger = set_logger(path_log="train_results/logs_training.log")

name_experiment = f"Name-Experiment"  # -> folder inside '/logs'
order_output_model = [
    "Perro", "Gato"
]  # None -> alphabetic order of classes from values of "data/encode_labels.json"

# Load Data
paths_train = fromJSON("data/list_train.json")
paths_val = fromJSON("data/list_val.json")
labels = fromJSON("data/labels.json")
encode_labels = fromJSON("data/encode_labels.json")

# Info about Image
img_format = "rgb"
Exemplo n.º 4
0
def main(argv):
    """
    Main driver/runner of 3D U-Net model.
    """
    
    # -------------------------------------------------------------------------
    # setup
    # -------------------------------------------------------------------------

    # set the random seed for the whole graph for reproductible experiments
    tf.set_random_seed(42)

    # load the parameters from model's json file as a dict
    args = arg_parser(argv)
    json_path = os.path.join(args.model_dir, 'params.json')
    assert os.path.isfile(
        json_path), "No json configuration file found at {}".format(json_path)
    params = Params(json_path).dict
    
    # check mode
    modes = ['train', 'train_eval', 'eval', 'predict']
    assert args.mode in modes, "mode has to be one of %s" % ','.join(modes) 
    
    # create logger, add loss and IOU to logging
    logger = set_logger(os.path.join(args.model_dir, 'train.log'))
    
    # -------------------------------------------------------------------------
    # create model
    # -------------------------------------------------------------------------
    
    model = tf.estimator.Estimator(
        model_fn=model_fn,
        model_dir=args.model_dir,
        params=params,
        config=tf.estimator.RunConfig(
            log_step_count_steps=params['display_steps']
        )
    )
    
    # -------------------------------------------------------------------------
    # train
    # -------------------------------------------------------------------------
    
    if args.mode in ['train_eval', 'train']:
        model.train(
            input_fn=lambda: input_fn(True, params),
            max_steps=params['max_train_steps']
        )
    
    # -------------------------------------------------------------------------
    # evaluate
    # -------------------------------------------------------------------------
    
    if args.mode in ['train_eval', 'eval']:
        model.evaluate(input_fn=lambda: input_fn(False, params))
    
    # -------------------------------------------------------------------------
    # predict
    # -------------------------------------------------------------------------
    
    if args.mode == 'predict':
        predictions = model.predict(input_fn=lambda: input_fn(False, params))

        # extract predictions, only save predicted classes not probs
        to_save = dict()
        for i, y_pred in enumerate(predictions):
            if i in args.pred_ix:
                logger.info('Predicting patient: %d.' % i)
                to_save[i] = y_pred
        
        # save them with pickle to model dir
        pred_file = os.path.join(args.model_dir, 'preds.npy')
        pickle.dump(to_save, open(pred_file,"wb"))
        logger.info('Predictions saved to: %s.' % pred_file)
Exemplo n.º 5
0
weight_init = configs["weight"]
if "weight_g61" in configs:
    weight_g61 = configs["weight_g61"]
else:
    weight_g61 = 0.0

# data parameters ****************************
lshc = configs["limbc"]
group = configs["group"]

device_ids = configs["device"]  # multi-GPU
torch.cuda.set_device(device_ids[0])
cuda = torch.cuda.is_available()

logger = set_logger(os.path.join(model_name, 'train.log'))
logger.info(
    "************** Experiment Name: {} **************".format(model_name))

# ******************** build model ********************
logger.info("Create Model ...")

model = CPMHandLimb(outc=21, lshc=lshc, pretrained=True)
if cuda:
    model = model.cuda(device_ids[0])
    model = nn.DataParallel(model, device_ids=device_ids)

# ******************** data preparation  ********************
my_dataset = getattr(dataset, configs["dataset"])
train_data = my_dataset(data_root=data_root, mode='train', group=group)
valid_data = my_dataset(data_root=data_root, mode='valid', group=group)
Exemplo n.º 6
0
args.cuda = args.cuda and torch.cuda.is_available()

out_dir = args.out_dir_path.strip('\r\n')
model_save = os.path.join(out_dir,
                          'models/modelbgrepproper.pt')

U.mkdir_p(out_dir + '/preds')
U.mkdir_p(out_dir + '/models/')
U.mkdir_p(out_dir + '/logs/')

configure(os.path.join(out_dir,
                       'logs/'+args.nm),
          flush_secs=5)

U.set_logger(out_dir)
U.print_args(args)

DEFAULT_COMPRESSED_DATASET = 'datasets-pickled.pkl'


np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)

if args.compressed_datasets == '':
    # train
    train_dataset = ASAPDataset(args.train_path, maxlen=args.maxlen, vocab_size=args.vocab_size, vocab_file=out_dir + '/vocab.pkl', pos=args.pos, read_vocab=(args.vocab_path is not None))
    vocab = train_dataset.vocab
    train_dataset.make_scores_model_friendly()
    # test