示例#1
0
# for commit 300052df3430228ef5e8bc55e46845d95d5e57f0

from config.easy_dict import EasyDict

config = EasyDict()

config.model_name = "encoded_cheby_highway"
config.model_kwargs = {
    "k": 3,
    "layers": 20,
    "graph_w": 256,
    "concat_layers": [3, 5, 7, 10, 13, 16, 20],
    "lin_ws": [64, 64]
}

config.data_path = "/data/WatChMaL/data/IWCDmPMT_4pi_fulltank_test/split_h5/IWCDmPMT_4pi_fulltank_test_graphnet_trainval.h5"
config.indices_file = "/data/WatChMaL/data/IWCDmPMT_4pi_fulltank_test/split_h5/IWCDmPMT_4pi_fulltank_test_graphnet_trainval_idxs.npz"
config.edge_index_pickle = "/data/WatChMaL/graphnets/visualization/mpmt_edges_dict.pkl"

config.dump_path = "/data/WatChMaL/graphnets/GraphNets/dump/" + config.model_name

config.num_data_workers = 0  # Sometime crashes if we do multiprocessing
config.device = 'gpu'
config.gpu_list = [6]

config.optimizer = "SGD"
config.optimizer_kwargs = {
    "lr": 0.01,
    "weight_decay": 1e-3,
    "momentum": 0.9,
    "nesterov": True
示例#2
0
from config.easy_dict import EasyDict

config = EasyDict()

config.cols_to_use = [0,1,2,3]
# may want to change: feat_size, layer_dims, etc. 
config.model_kwargs = {"num_features":1, "num_classes": 3}
config.data_path = "/fast_scratch/WatChMaL/data/pointnet/pointnet_trainval.h5"
config.indices_file = "/fast_scratch/WatChMaL/data/pointnet/pointnet_trainval_idxs.npz"

#make sure to change this
config.dump_path = "/home/dgreen/training_outputs/pointnet2/no_time/adam/"

config.num_data_workers = 0 # Sometime crashes if we do multiprocessing
config.device = 'cuda:6'

config.optimizer = "Adam"
config.optimizer_kwargs = {"lr":1e-3, "betas": (0.9, 0.999)}

config.use_scheduler = False
config.scheduler_kwargs = {"mode":"min", "min_lr":1e-6, "patience":1, "verbose":True}
config.scheduler_step = 190

config.batch_size = 32
config.epochs = 20

config.report_interval = 200
config.num_val_batches  = 256
config.valid_interval   = 1000

config.validate_batch_size = 32
示例#3
0
# for commit 3277f51e257c94e2ce98545bfd5115b29

from config.easy_dict import EasyDict

config = EasyDict()

## Model
config.model_name = "gcn_kipf"
config.model_kwargs = {"w1": 4, "w2": 8, "w3": 16}

## Data paths
config.data_path = "/app/test_data/split_h5/IWCDmPMT_4pi_fulltank_test_graphnet_trainval.h5"
config.indices_file = "/app/test_data/split_h5/IWCDmPMT_4pi_fulltank_test_graphnet_trainval_idxs.npz"
config.edge_index_pickle = "/app/GraphNets/metadata/edges_dict.pkl"

## Log location
config.dump_path = "/app/GraphNets/dump/gcn"

## Computer Parameters
config.num_data_workers = 0  # Sometime crashes if we do multiprocessing
config.device = 'gpu'
config.gpu_list = [0]

# Optimizer Parameters
config.optimizer = "Adam"
config.optimizer_kwargs = {"lr": 0.01, "weight_decay": 5e-4}

## Training parameters
config.batch_size = 32
config.epochs = 1
from config.easy_dict import EasyDict

config = EasyDict()

config.model_name = "Pointnet"

config.cols_to_use = [0,1,2,3,4]
# may want to change: feat_size, layer_dims, etc. 
config.model_kwargs = {"in_channels": len(config.cols_to_use), 
					   "num_classes": 3}

config.data_path = "/fast_scratch/WatChMaL/data/pointnet/pointnet_trainval.h5"
config.indices_file = "/fast_scratch/WatChMaL/data/pointnet/pointnet_trainval_idxs.npz"

#make sure to change this
config.dump_path = "/home/dgreen/training_outputs/pointnet/time/adam/"

config.num_data_workers = 0 # Sometime crashes if we do multiprocessing
config.device = 'cuda:7'

config.optimizer = "Adam"
config.optimizer_kwargs = {"lr":1e-3, "betas": (0.9, 0.999)}

config.use_scheduler = False
config.scheduler_kwargs = {"mode":"min", "min_lr":1e-6, "patience":1, "verbose":True}
config.scheduler_step = 190

config.batch_size = 32
config.epochs = 20

config.report_interval = 200
示例#5
0
# for commit 300052df3430228ef5e8bc55e46845d95d5e57f0

from config.easy_dict import EasyDict

config = EasyDict()

config.model_name = "cheby_batch_topk"
config.model_kwargs = {"layers": 3, "graph_w": 128, "lin_ws": [32, 8], 'k': 3}

config.data_path = "/fast_scratch/IWCDmPMT_4pi_fulltank_9M_graphnet_trainval.h5"
config.indices_file = "/fast_scratch/IWCDmPMT_4pi_fulltank_9M_graphnet_trainval_idxs.npz"
config.edge_index_pickle = "/project_dir/visualization/edges_dict.pkl"

config.dump_path = "/project_dir/dump/" + config.model_name

config.num_data_workers = 0  # Sometime crashes if we do multiprocessing
config.device = 'gpu'
config.gpu_list = [0]

config.optimizer = "SGD"
config.optimizer_kwargs = {
    "lr": 0.01,
    "weight_decay": 1e-3,
    "momentum": 0.9,
    "nesterov": True
}

config.scheduler_kwargs = {
    "mode": "min",
    "min_lr": 1e-6,
    "patience": 10,
示例#6
0
# for commit 300052df3430228ef5e8bc55e46845d95d5e57f0

from config.easy_dict import EasyDict

config = EasyDict()

## Model
config.model_name = "gcn_kipf"
config.model_kwargs = {"w1":10, "w2":12, "w3":8}

## Data paths
config.data_path = "/fast_scratch/NeutronGNN/iwcd_mpmt_shorttank_neutrongnn_trainval.h5"
config.indices_file = "/fast_scratch/NeutronGNN/iwcd_mpmt_shorttank_neutrongnn_trainval_idxs.npz"

## Log location
config.dump_path = "/fast_scratch/NeutronGNN/dump/" + config.model_name

## Computer parameters
config.num_data_workers = 0 # Sometime crashes if we do multiprocessing
config.device = 'gpu'
config.gpu_list = [1]

## Optimizer parameters
config.optimizer = "Adam"
config.optimizer_kwargs = {"lr":1e-3}
#config.optimizer_kwargs = {"lr":0.01, "weight_decay":5e-4}

## Scheduler parameters
config.scheduler_kwargs = {"mode":"min", "min_lr":1e-6, "patience":1, "verbose":True}
config.scheduler_step = 190
示例#7
0
from config.easy_dict import EasyDict

config = EasyDict()

config.model_name = "Pointnet"

config.cols_to_use = [0, 1, 2, 3, 4]
# may want to change: feat_size, layer_dims, etc.
config.model_kwargs = {"in_features": 2, "num_classes": 3}

config.data_path = "/fast_scratch/WatChMaL/data/pointnet/pointnet_trainval.h5"
config.indices_file = "/fast_scratch/WatChMaL/data/pointnet/pointnet_trainval_idxs.npz"

#make sure to change this
config.dump_path = "/home/dgreen/training_outputs/pointnet2/time/adam/"

config.num_data_workers = 0  # Sometime crashes if we do multiprocessing
config.device = 'cuda:7'

config.optimizer = "Adam"
config.optimizer_kwargs = {"lr": 1e-3, "betas": (0.9, 0.999)}

config.use_scheduler = False
config.scheduler_kwargs = {
    "mode": "min",
    "min_lr": 1e-6,
    "patience": 1,
    "verbose": True
}
config.scheduler_step = 190