Exemplo n.º 1
0
def main():
    # Set the random seed for reproducible experiments
    torch.manual_seed(230)

    parser = argparse.ArgumentParser()
    parser.add_argument('--data_dir', help="Directory containing the dataset")
    parser.add_argument('--model_dir', help="Directory containing params.json")
    parser.add_argument('--params', help='Directory containing params.json')
    parser.add_argument('--restore_file',
                        default='best',
                        help="name of the file in --model_dir \
                         containing weights to load")

    params = utils.Params(args.params)

    # Get the logger
    utils.set_logger(os.path.join(params.model_dir, 'evaluate.log'))

    # Create the input data pipeline
    logging.info("Creating the dataset...")

    test_dataset = dataset(file_path=params.metadata_file,
                           split="Test",
                           classes=params.classes)

    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=params.batch_size,
                             shuffle=True,
                             num_workers=8)

    logging.info("- done.")

    # Define the model and optimizer
    if model != "Inception":
        net = importlib.import_module("features.models.{}".format(
            params.model))
        model = net.Net()
        inception = False
    else:
        model = models.inception_v3(pretrained=False)
        model.fc = nn.Linear(2048, num_classes)
        model.AuxLogits.fc = nn.Linear(768, 1)
        inception = True

    model.cuda()

    metrics_save = metrics_code.metrics_save

    logging.info("Starting evaluation")

    # Reload weights from the saved file
    utils.load_checkpoint(
        os.path.join(args.model_dir, args.restore_file + '.pth.tar'), model)

    # Evaluate
    test_metrics = evaluate(model, test_loader, metrics_save, experiment,
                            inception)
    save_path = os.path.join(model_dir,
                             "metrics_test_{}.json".format(restore_file))
    utils.save_dict_to_json(test_metrics, save_path)
 def __init__(self, config="./config/reader.json"):
     params = utils.Params(config)
     self._path = params.catalogpath
     self.data = {
         "catalog":
         fits.open(self._path + params.catalogName,
                   mode='readonly',
                   memmap=True,
                   lazy_load_hdus=True)[1].data,
         "members":
         fits.open(self._path + params.memberName,
                   mode='readonly',
                   memmap=True,
                   lazy_load_hdus=True)[1].data,
         "areaZ":
         fits.open(self._path + params.areaZ,
                   mode='readonly',
                   memmap=True,
                   lazy_load_hdus=True)[1].data,
         "random":
         fits.open(self._path + params.random,
                   mode='readonly',
                   memmap=True,
                   lazy_load_hdus=True)[1].data
     }
     self.quantities = {}
     for item in self.data.keys():
         self.quantities[item] = self.data[item].names
Exemplo n.º 3
0
	def __init__(self):
		params = utils.Params(_json_file)
		self.model = HCN.HCN(**params.model_args)
		self.optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()), lr=params.lr, betas=(0.9, 0.999), eps=1e-8,
				 weight_decay=params.weight_decay)
		out_channel = params.model_args['out_channel']
		window_size = params.model_args['window_size']
		self.model.fc7 = nn.Sequential(
			nn.Linear((out_channel * 4)*(window_size//16)*(window_size//16), 256), 
			nn.ReLU(),
			nn.Dropout2d(p=0.5))
		self.model.fc8 = nn.Linear(256, 12)

		checkpoint = utils.load_checkpoint(_chkp_path, self.model, self.optimizer)

		self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
		self.model = self.model.to(self.device)
		self.model.eval()
Exemplo n.º 4
0
    # Load the parameters from json file
    args = parser.parse_args()
    experiment_path = os.path.join(args.model_dir, 'experiments',
                                   args.dataset_name,
                                   args.model_name + args.num)
    if not os.path.isdir(experiment_path):
        os.makedirs(experiment_path)

    json_file = os.path.join(experiment_path, 'params.json')
    if not os.path.isfile(json_file):
        with open(json_file, 'w') as f:
            print("No json configuration file found at {}".format(json_file))
            f.close()
            print('successfully made file: {}'.format(json_file))

    params = utils.Params(json_file)

    if args.load:
        print("args.load=", args.load)
        if args.load_model:
            params.restore_file = args.load_model
        else:
            params.restore_file = experiment_path + '/checkpoint/best.pth.tar'

    params.dataset_dir = args.dataset_dir
    params.dataset_name = args.dataset_name
    params.model_version = args.model_name
    params.experiment_path = experiment_path
    params.mode = args.mode
    if params.gpu_id >= -1:
        params.cuda = True
Exemplo n.º 5
0
    metrics_string = " ; ".join("{}: {:05.3f}".format(k, v)
                                for k, v in metrics_mean.items())
    logging.info("- Eval metrics : " + metrics_string)
    return metrics_mean


if __name__ == '__main__':
    """
        Evaluate the model on the test set.
    """
    # Load the parameters from json file
    args = parser.parse_args()
    json_path = os.path.join(args.model_dir, 'params.json')
    assert os.path.isfile(
        json_path), "No json configuration file found at {}".format(json_path)
    params = utils.Params(json_path)

    # use GPU if available
    params.cuda = torch.cuda.is_available()  # use GPU is available

    # Set the random seed for reproducible experiments
    torch.manual_seed(230)
    if params.cuda:
        torch.cuda.manual_seed(230)

    # Get the logger
    utils.set_logger(os.path.join(args.model_dir, 'evaluate.log'))

    # Create the input data pipeline
    logging.info("Creating the dataset...")
        logging.info("-"*20)

        early_stopping(val_loss_source)
        if early_stopping.early_stop:
            logging.info("Early stopping")
            break

if __name__ == '__main__':

    # Load the parameters from json file
    args = parser.parse_args()

    json_path = os.path.join(args.model_dir_source, 'params.json')
    assert os.path.isfile(
        json_path), "No json configuration file found at {}".format(json_path)
    params_source = utils.Params(json_path)

    json_path = os.path.join(args.model_dir_target, 'params.json')
    assert os.path.isfile(
        json_path), "No json configuration file found at {}".format(json_path)
    params_target = utils.Params(json_path)

    json_path = os.path.join(args.model_dir_transfer, 'params.json')
    assert os.path.isfile(
        json_path), "No json configuration file found at {}".format(json_path)
    params_transfer = utils.Params(json_path)

    ckpt_filename = "checkpoint.tar"
    best_ckpt_filename = "model_best.tar"
    writer = SummaryWriter(args.tensorboard_dir)
Exemplo n.º 7
0
# Clear terminal with ANSI <ESC> c "\033c"
# print("\033c", end="") # (Doesn't fully clear screen on PC)
print("\033[H\033[2J", end="")

# Initialize paths to json parameters
data_path = Path().absolute() / "data"
model_path = Path().absolute() / "experiments/train/"
pretrain_path = Path().absolute() / "experiments/pretrain/"
json_path = model_path / "params.json"
json_path_pretrain = pretrain_path / "params.json"

# Load params json
assert json_path.is_file(
), f"\n\nERROR: No params.json file found at {json_path}\n"
params = utils.Params(json_path)
pretrain_params = utils.Params(json_path_pretrain)


# If GPU, write to params file
params.cuda = torch.cuda.is_available()

# Set random seed
torch.manual_seed(42)
if params.cuda:
    torch.cuda.manual_seed(42)
    # Update num_workers to 2 if running on GPU
    params.num_workers = 2


def train(dataloader,
Exemplo n.º 8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--params', help='Directory containing params.json')

    args = parser.parse_args()
    params = utils.Params(args.params)

    # pull out lr and decay for easy access
    learning_rate = params.learning_rate
    decay = params.decay

    #Set the random seed for reproducible experiments
    torch.manual_seed(230)

    experiment = Experiment(api_key=params.comet_api,
                            project_name=params.comet_name,
                            workspace="ayeaton")

    # Set the logger
    utils.set_logger(os.path.join(params.implementation_dir, 'train.log'))
    logging.info(experiment)

    log_params = {
        "learning_rate": learning_rate,
        "decay": decay,
        "batch_size": params.batch_size,
        "dropout_rate": params.dropout_rate,
        "model": params.model,
        "optimizer": params.optimizer,
        "loss_func": params.loss_func,
        "classes": params.classes,
        "metadata_file": params.metadata_file,
        "model_dir": params.model_dir,
        "implementation_dir": params.implementation_dir
    }
    experiment.log_parameters(log_params)

    # Create the input data pipeline
    logging.info("Loading the datasets...")

    # get data
    train_dataset = dataset(file_path=params.metadata_file,
                            split="Train",
                            classes=params.classes)

    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=params.batch_size,
                              shuffle=True,
                              num_workers=8)

    val_dataset = dataset(file_path=params.metadata_file,
                          split="Val",
                          classes=params.classes)

    val_loader = DataLoader(dataset=val_dataset,
                            batch_size=params.batch_size,
                            shuffle=True,
                            num_workers=8)

    logging.info("- done.")

    # Define the model and optimizer
    if params.model != "Inception":
        net = importlib.import_module("models.{}".format(params.model))
        model = net.Net()
        inception = False
    else:
        model = models.inception_v3(pretrained=False)
        model.fc = nn.Linear(2048, len(params.classes))
        model.AuxLogits.fc = nn.Linear(768, 1)
        inception = True

    logging.info("Model -- {}".format(repr(model)))

    model.cuda()

    # fetch loss function and metrics
    metrics_save = metrics_code.metrics_save

    # Train the model
    logging.info("Starting training for {} epoch(s)".format(params.num_epochs))

    train_and_evaluate(model, train_loader, val_loader, metrics_save,
                       params.implementation_dir, params.num_epochs,
                       params.loss_func, params.optimizer, learning_rate,
                       decay, params.save_summary_steps, experiment, inception)