"enc3": torch.ones(128, 128),
        "enc4": torch.ones(8, 128),
        "dec1": torch.ones(128, 8),
        "dec2": torch.ones(128, 128),
        "dec3": torch.ones(128, 128),
        "dec4": torch.ones(128, 128),
        "dout": torch.ones(640, 128),
    }
    # Setup cuda
    use_cuda = False  # torch.cuda.is_available() #Don't really need CUDA here at the moment, so just set false
    device = torch.device("cuda:0" if use_cuda else "cpu")
    torch.backends.cudnn.benchmark = True

    # Load datasets
    #yrdy_dataset = jet_dataset.ParticleJetDataset('train_data/train/', yamlConfig)
    test_dataset = jet_dataset.ParticleJetDataset(options.test, yamlConfig)
    test_labels = test_dataset.labels_list
    #train_loader = torch.utils.data.DataLoader(full_dataset, batch_size=10000,
    #                                         shuffle=True, num_workers=0)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=25000,
                                              shuffle=False,
                                              num_workers=0)

    dir = "model_files/"
    dir = options.model_files

    #
    # try:
    #     if options.batnorm:
    #         loadmodel = models.three_layer_model_batnorm_masked(prune_mask_set, bn_affine=options.bn_affine,
import hls4ml


## Load yaml config
def parse_config(config_file):

    print("Loading configuration from", config_file)
    config = open(config_file, 'r')
    return yaml.load(config, Loader=yaml.FullLoader)


yamlConfig = parse_config("yamlConfig.yml")

# Setup test data set
test_dataset = jet_dataset.ParticleJetDataset("./train_data/test/", yamlConfig)
test_size = len(test_dataset)

print("test dataset size: " + str(len(test_dataset)))

test_loader = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=test_size,
                                          shuffle=False,
                                          num_workers=10,
                                          pin_memory=True)

X_test = None
y_test = None

for i, data in enumerate(test_loader, 0):
    X_test, y_test = data[0].numpy(), data[1].numpy()
Exemple #3
0
    # Setup cuda
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda:0" if use_cuda else "cpu")
    print("Using Device: {}".format(device))
    torch.backends.cudnn.benchmark = True
    torch.backends.cudnn.fastest = True
    if use_cuda:
        print("cuda:0 device type: {}".format(torch.cuda.get_device_name(0)))

    # Set Batch size and split value
    batch_size = 1024
    train_split = 0.75

    # Setup and split dataset
    full_dataset = jet_dataset.ParticleJetDataset(options.inputFile,
                                                  yamlConfig)
    test_dataset = jet_dataset.ParticleJetDataset(options.test, yamlConfig)
    train_size = int(
        train_split *
        len(full_dataset))  # 25% for Validation set, 75% for train set

    val_size = len(full_dataset) - train_size
    test_size = len(test_dataset)

    num_val_batches = math.ceil(val_size / batch_size)
    num_train_batches = math.ceil(train_size / batch_size)
    print("train_batches " + str(num_train_batches))
    print("val_batches " + str(num_val_batches))

    train_dataset, val_dataset = torch.utils.data.random_split(
        full_dataset, [train_size, val_size])
        torch.backends.cudnn.enabled = True
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
    else:
        torch.backends.cudnn.enabled = True
        torch.backends.cudnn.benchmark = True
        torch.backends.cudnn.fastest = True

    # Set Batch size and split value
    batch_size = 1024

    if options.fold is None: #No fold passed, just load a whole folder and randomly split train/test
        train_split = 0.75

        # Setup and split dataset
        full_dataset = jet_dataset.ParticleJetDataset(options.inputFile,yamlConfig)

        train_size = int(train_split * len(full_dataset))  # 25% for Validation set, 75% for train set

        val_size = len(full_dataset) - train_size


        num_val_batches = math.ceil(val_size/batch_size)
        num_train_batches = math.ceil(train_size/batch_size)
        print("train_batches " + str(num_train_batches))
        print("val_batches " + str(num_val_batches))

        train_dataset, val_dataset = torch.utils.data.random_split(full_dataset,[train_size,val_size])
    else:
        train_filenames = []
        val_filename = ""