Esempio n. 1
0
    def __init__(self, config):
        super().__init__(config)

        self.smt_loader = SMTDataLoader(config)
        labels = self.smt_loader.train_loader.dataset.data.y.numpy()
        class_weights = calculate_class_weights(labels=labels)
        self.class_weights = torch.from_numpy(class_weights.astype(np.float32))
        self.loss = nn.NLLLoss(weight=self.class_weights)

        # set cuda flag
        self.is_cuda = torch.cuda.is_available()
        if self.is_cuda and not self.config.cuda:
            self.logger.info("WARNING: You have a CUDA device, " +
                             "so you should probably enable it!")
        self.cuda = self.is_cuda & self.config.cuda

        # set the manual seed for torch
        self.manual_seed = self.config.seed
        if self.cuda:
            torch.cuda.manual_seed(self.manual_seed)
            self.device = torch.device('cuda')
            self.loss = self.loss.to(self.device)
            self.class_weights = self.class_weights.to(self.device)
            self.logger.info("Program will run on *****GPU-CUDA*****")
            print_cuda_statistics()
        else:
            self.device = torch.device("cpu")
            torch.manual_seed(self.manual_seed)
            self.logger.info("Program will run on *****CPU*****\n")
    def __init__(self, config):
        super().__init__(config)
        # Create an instance from the Model

        # define model
        self.model = ConvTasNet(self.config.model)
        # Create an instance from the data loader
        self.data_loader = Musdb18DB(self.config.data)
        self.cuda = config.cuda

        if self.cuda:
            torch.cuda.manual_seed_all(self.config.seed)
            self.device = torch.device("cuda")
            #torch.cuda.set_device(self.config.gpu_device)
            #torch.cuda.set_device(4)
            self.logger.info("Operation will be on *****GPU-CUDA***** ")
            print_cuda_statistics()

        else:
            self.device = torch.device("cpu")
            torch.manual_seed(self.config.seed)
            self.logger.info("Operation will be on *****CPU***** ")

        self.model = self.model.to(self.device)

        self.current_epoch = 0
        self.current_iteration = 0
Esempio n. 3
0
    def __init__(self, config):
        #setup CUDA, seeds, logger...
        self.config = config
        self.logger = logging.getLogger(config.exp_name)
        self.logger.info("Creating architecture...")

        np.random.seed(0)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
        torch.manual_seed(0)

        self.data_loader = None #TODO define in children

        self.current_iteration = 0
        self.cuda = torch.cuda.is_available() & self.config.cuda

        if self.cuda:
            self.device = torch.device("cuda")
            self.logger.info("Operation will be on *****GPU-CUDA***** ")
            print_cuda_statistics()
        else:
            self.device = torch.device("cpu")
            self.logger.info("Operation will be on *****CPU***** ")

        self.writer = SummaryWriter(log_dir=self.config.summary_dir)
    def __init__(self, config):
        super().__init__(config)
        # Create an instance from the Model
        self.logger.info("Loading encoder pretrained in imagenet...")
        if self.config.pretrained_encoder:
            pretrained_enc = torch.nn.DataParallel(
                ERFNet(self.config.imagenet_nclasses)).cuda()
            pretrained_enc.load_state_dict(
                torch.load(self.config.pretrained_model_path)['state_dict'])
            pretrained_enc = next(pretrained_enc.children()).features.encoder
        else:
            pretrained_enc = None
        # define erfNet model
        self.model = ERF(self.config, pretrained_enc)
        # Create an instance from the data loader
        self.data_loader = VOCDataLoader(self.config)
        # Create instance from the loss
        self.loss = CrossEntropyLoss(self.config)
        # Create instance from the optimizer
        self.optimizer = torch.optim.Adam(
            self.model.parameters(),
            lr=self.config.learning_rate,
            betas=(self.config.betas[0], self.config.betas[1]),
            eps=self.config.eps,
            weight_decay=self.config.weight_decay)
        # Define Scheduler
        lambda1 = lambda epoch: pow(
            (1 - ((epoch - 1) / self.config.max_epoch)), 0.9)
        self.scheduler = lr_scheduler.LambdaLR(self.optimizer,
                                               lr_lambda=lambda1)
        # initialize my counters
        self.current_epoch = 0
        self.current_iteration = 0
        self.best_valid_mean_iou = 0

        # Check is cuda is available or not
        self.is_cuda = torch.cuda.is_available()
        # Construct the flag and make sure that cuda is available
        self.cuda = self.is_cuda & self.config.cuda

        if self.cuda:
            torch.cuda.manual_seed_all(self.config.seed)
            self.device = torch.device("cuda")
            torch.cuda.set_device(self.config.gpu_device)
            self.logger.info("Operation will be on *****GPU-CUDA***** ")
            print_cuda_statistics()

        else:
            self.device = torch.device("cpu")
            torch.manual_seed(self.config.seed)
            self.logger.info("Operation will be on *****CPU***** ")

        self.model = self.model.to(self.device)
        self.loss = self.loss.to(self.device)
        # Model Loading from the latest checkpoint if not found start from scratch.
        self.load_checkpoint(self.config.checkpoint_file)

        # Tensorboard Writer
        self.summary_writer = SummaryWriter(log_dir=self.config.summary_dir,
                                            comment='FCN8s')
Esempio n. 5
0
    def __init__(self, config):
        self.config = config
        self.logger = logging.getLogger("STGAN")
        self.logger.info("Creating STGAN architecture...")

        self.G = Generator(len(self.config.attrs),
                           self.config.g_conv_dim,
                           self.config.g_layers,
                           self.config.shortcut_layers,
                           use_stu=self.config.use_stu,
                           one_more_conv=self.config.one_more_conv)
        self.D = Discriminator(self.config.image_size, len(self.config.attrs),
                               self.config.d_conv_dim, self.config.d_fc_dim,
                               self.config.d_layers)

        self.data_loader = globals()['{}_loader'.format(self.config.dataset)](
            self.config.data_root, self.config.att_list_file, self.config.mode,
            self.config.attrs, self.config.crop_size, self.config.image_size,
            self.config.batch_size)

        self.current_iteration = 0
        self.cuda = torch.cuda.is_available() & self.config.cuda

        if self.cuda:
            self.device = torch.device("cuda")
            self.logger.info("Operation will be on *****GPU-CUDA***** ")
            print_cuda_statistics()
        else:
            self.device = torch.device("cpu")
            self.logger.info("Operation will be on *****CPU***** ")

        self.writer = SummaryWriter(log_dir=self.config.summary_dir)
Esempio n. 6
0
    def __init__(self, config):
        super().__init__(config)

        # Set cuda flag
        self.is_cuda = torch.cuda.is_available()
        if self.is_cuda and not self.config.cuda:
            self.logger.info("WARNING: You have a CUDA device - enable it!")
        self.cuda = self.is_cuda & self.config.cuda

        # Set the manual seed for torch
        if self.cuda:
            self.device = torch.device("cuda")
            torch.cuda.set_device(self.config.gpu_device)
            torch.cuda.manual_seed_all(self.config.seed)
            self.logger.info("Program will run on ***GPU-CUDA***")
            print_cuda_statistics()
        else:
            self.device = torch.device("cpu")
            torch.manual_seed(self.config.seed)
            self.logger.info("Program will run on ***CPU***")

        # Define model
        self.model = PCN(self.config)
        self.model = self.model.to(self.device)

        # Define dataloader
        self.train_dataloader = ShapeNetPointCloudDataLoader(
            self.config, dataset_mode='train')
        self.validate_dataloader = ShapeNetPointCloudDataLoader(
            self.config, dataset_mode='valid')

        # Define optimizer and scheduler
        self.optimizer = torch.optim.Adam(
            self.model.parameters(),
            lr=self.config.learning_rate,
            eps=1e-04,
            weight_decay=self.config.weight_decay)
        self.scheduler = torch.optim.lr_scheduler.ExponentialLR(
            self.optimizer, 0.97)

        # Define criterion
        self.criterion = ChamferDistance()
        self.criterion = self.criterion.to(self.device)

        # Initialize counter
        self.current_epoch = 0
        self.current_iteration = 0
        self.best_valid_mean_loss = 1.0  # The best loss will be far less than 1.0

        # Load model from the latest checkpoint.
        # If none can be found, start from scratch.
        self.load_checkpoint(self.config.checkpoint_file)

        # Visualization in visdom during training
        self.vis = visdom.Visdom()

        # Summary Writer
        self.summary_writer = SummaryWriter(log_dir=self.config.summary_dir,
                                            comment='PCN')
Esempio n. 7
0
    def __init__(self, config):
        self.config = config

        self.logger = logging.getLogger("DQNAgent")

        # define models (policy and target)
        self.policy_model = DQN(self.config)
        self.target_model = DQN(self.config)

        # define memory
        self.memory = ReplayMemory(self.config)

        # define loss
        self.loss = HuberLoss()

        # define optimizer
        self.optim = torch.optim.RMSprop(self.policy_model.parameters())

        # define environment
        self.env = gym.make('CartPole-v0').unwrapped
        self.cartpole = CartPoleEnv(self.config.screen_width)

        # initialize counter
        self.current_episode = 0
        self.current_iteration = 0
        self.episode_durations = []

        self.batch_size = self.config.batch_size

        # set cuda flag
        self.is_cuda = torch.cuda.is_available()
        if self.is_cuda and not self.config.cuda:
            self.logger.info(
                "WARNING: You have a CUDA device, so you should probably enable CUDA"
            )

        self.cuda = self.is_cuda & self.config.cuda

        if self.cuda:
            self.logger.info("Program will run on *****GPU-CUDA***** ")
            print_cuda_statistics()
            self.device = torch.device("cuda")
            torch.cuda.set_device(self.config.gpu_device)
        else:
            self.logger.info("Program will run on *****CPU***** ")
            self.device = torch.device("cpu")

        self.policy_model = self.policy_model.to(self.device)
        self.target_model = self.target_model.to(self.device)
        self.loss = self.loss.to(self.device)

        # Initialize Target model with policy model state dict
        self.target_model.load_state_dict(self.policy_model.state_dict())
        self.target_model.eval()

        # Summary Writer
        self.summary_writer = SummaryWriter(log_dir=self.config.summary_dir,
                                            comment='DQN')
    def __init__(self, config):
        super().__init__(config)

        self.net = Discriminator(self.config)  # Segmenation Network
        if config.phase == 'testing':
            self.testloader = Supervised_Dataset(self.config, "testing")
        else:
            self.trainloader = Supervised_Dataset(self.config, "training")
            self.valloader = Supervised_Dataset(self.config, "validating")

        # optimizer
        self.optimizer = torch.optim.Adam(self.net.parameters(),
                                          lr=self.config.learning_rate,
                                          betas=(self.config.beta1,
                                                 self.config.beta2))

        # counter initialization
        self.current_epoch = 0
        self.best_validation_dice = 0
        self.current_iteration = 0

        self.is_cuda = torch.cuda.is_available()
        if self.is_cuda and not self.config.cuda:
            self.logger.info(
                "WARNING: You have a CUDA device, so you should probably enable CUDA"
            )

        self.cuda = self.is_cuda & self.config.cuda

        if self.cuda:
            self.net = self.net.cuda()

        class_weights = torch.tensor([[0.33, 1.5, 0.83, 1.33]])
        if self.cuda:
            class_weights = torch.FloatTensor(class_weights).cuda()
        self.criterion = nn.CrossEntropyLoss(class_weights)

        # set the manual seed for torch
        if not self.config.seed:
            self.manual_seed = random.randint(1, 10000)
        else:
            self.manual_seed = self.config.seed
        self.logger.info("seed: %d", self.manual_seed)
        random.seed(self.manual_seed)
        if self.cuda:
            self.device = torch.device("cuda")
            torch.cuda.set_device(self.config.gpu_device)
            torch.cuda.manual_seed_all(self.manual_seed)
            self.logger.info("Program will run on *****GPU-CUDA***** ")
            print_cuda_statistics()
        else:
            self.device = torch.device("cpu")
            torch.manual_seed(self.manual_seed)
            self.logger.info("Program will run on *****CPU***** ")

        if (self.config.load_chkpt == True):
            self.load_checkpoint()
Esempio n. 9
0
    def __init__(self, config):
        super().__init__(config)
        print(torch.__version__)
        # define models
        self.model = DAFSL_CAEModel()
        summary(self.model,
                input_size=(3, self.config.image_size, self.config.image_size))

        # define loss
        self.loss = nn.MSELoss()  #nn.NLLLoss()

        # define optimizer
        self.optimizer = optim.RMSprop(self.model.parameters(),
                                       alpha=0.99,
                                       lr=self.config.learning_rate,
                                       eps=1e-08,
                                       weight_decay=0,
                                       momentum=self.config.momentum)
        #optim.SGD(self.model.parameters(), lr=self.config.learning_rate, momentum=self.config.momentum)

        # initialize counter
        self.current_epoch = 0
        self.current_iteration = 0
        self.best_metric = 0
        self.best_valid_loss = 0
        self.fixed_noise = Variable(
            torch.randn(self.config.batch_size, 3, self.config.image_size,
                        self.config.image_size))

        # set cuda flag
        self.is_cuda = torch.cuda.is_available()
        if self.is_cuda and not self.config.cuda:
            self.logger.info(
                "WARNING: You have a CUDA device, so you should probably enable CUDA"
            )

        self.cuda = self.is_cuda & self.config.cuda

        # set the manual seed for torch
        self.manual_seed = self.config.seed
        if self.cuda:
            torch.cuda.manual_seed(self.manual_seed)
            self.device = torch.device("cuda")
            torch.cuda.set_device(self.config.gpu_device)
            self.model = self.model.to(self.device)
            self.loss = self.loss.to(self.device)

            self.logger.info("Program will run on *****GPU-CUDA***** ")
            print_cuda_statistics()
        else:
            self.device = torch.device("cpu")
            torch.manual_seed(self.manual_seed)
            self.logger.info("Program will run on *****CPU*****\n")

        # Summary Writer
        self.summary_writer = SummaryWriter(log_dir=self.config.summary_dir,
                                            comment='DAFSL')
Esempio n. 10
0
    def __init__(self, config):
        super().__init__(config)

        # define models
        self.model = AntiSpoofing()

        # define data_loader
        self.data_loader = SiwDataLoader(config=config)

        # define loss
        self.loss = L1_loss()

        # define optimizer
        self.optimizer = optim.SGD(self.model.parameters(),
                                   lr=self.config.learning_rate,
                                   momentum=self.config.momentum)

        # initialize counter
        self.current_epoch = 0
        self.current_iteration = 0
        self.best_metric = 0

        # set cuda flag
        self.is_cuda = torch.cuda.is_available()
        if self.is_cuda and not self.config.cuda:
            self.logger.info(
                "WARNING: You have a CUDA device, so you should probably enable CUDA"
            )

        self.cuda = self.is_cuda & self.config.cuda

        # set the manual seed for torch
        self.manual_seed = self.config.seed
        if self.cuda:
            torch.cuda.manual_seed(self.manual_seed)
            self.device = torch.device("cuda")
            torch.cuda.set_device(self.config.gpu_device)
            self.model = self.model.to(self.device)
            self.loss = self.loss.to(self.device)

            self.logger.info("Program will run on *****GPU-CUDA***** ")
            print_cuda_statistics()
        else:
            self.device = torch.device("cpu")
            torch.manual_seed(self.manual_seed)
            self.logger.info("Program will run on *****CPU*****\n")

        self.model = self.model.to(self.device)
        self.loss = self.loss.to(self.device)

        # Model Loading from the latest checkpoint if not found start from scratch.
        self.load_checkpoint(self.config.checkpoint_file)
        # Summary Writer
        self.summary_writer = None
Esempio n. 11
0
    def __init__(self, cfg):
        super().__init__(cfg)
        print_cuda_statistics()
        self.device = get_device()

        # define models
        self.model

        # define data_loader 1 or 2
        # 1
        # tr_dataset = custom_dataset(cfg.tr_data_pth)
        # te_dataset = custom_dataset(cfg.te_data_pth)

        # 2
        # dataset = custom_dataset(cfg.data_pth)
        tr_dataset, te_dataset = random_split(dataset, [train_size, test_size])

        self.tr_loader = DataLoader(tr_dataset,
                                    batch_size=cfg.bs,
                                    shuffle=cfg.data_shuffle,
                                    num_workers=cfg.num_w)
        self.te_loader = DataLoader(te_dataset,
                                    batch_size=cfg.bs,
                                    shuffle=cfg.data_shuffle,
                                    num_workers=cfg.num_w)

        # define criterion
        self.criterion = Loss()

        # define optimizers for both generator and discriminator
        self.optimizer = None

        # initialize counter
        self.current_epoch = 0
        self.current_iteration = 0
        self.best_metric = 0  # loss or accuracy or etc

        # set the manual seed for torch
        self.manual_seed = self.cfg.seed
        if self.cuda:
            torch.cuda.manual_seed_all(self.manual_seed)
            torch.cuda.set_device(self.cfg.gpu_device)
            self.model = self.model.cuda()
            self.loss = self.loss.cuda()
            self.logger.info("Program will run on *****GPU-CUDA***** ")
            print_cuda_statistics()
        else:
            self.logger.info("Program will run on *****CPU*****\n")

        # Model Loading from cfg if not found start from scratch.
        self.load_checkpoint(self.cfg.checkpoint_file)
        # Summary Writer
        self.summary_writer = None
    def __init__(self, config):
        super().__init__(config)

        # set device
        self.is_cuda = torch.cuda.is_available()
        if self.is_cuda and not self.config.cuda:
            self.logger.info(
                "WARNING: You have a CUDA device, so you should probably enable CUDA"
            )

        self.cuda = self.is_cuda & self.config.cuda
        self.manual_seed = self.config.seed
        if self.cuda:
            self.device = torch.device("cuda")
            torch.cuda.manual_seed(self.manual_seed)
            torch.cuda.set_device(self.config.gpu_device)

            self.logger.info("Program will run on *****GPU-CUDA*****\n")
            print_cuda_statistics()
        else:
            self.device = torch.device("cpu")
            torch.manual_seed(self.manual_seed)
            self.logger.info("Program will run on *****CPU*****\n")

        self.num_classes = self.config.num_classes

        self.model = None  # original model graph, loss function, optimizer, learning scheduler
        self.loss_fn = None
        self.optimizer = None
        self.scheduler = None

        self.data_loader = Cifar100DataLoader(
            config=self.config)  # data loader
        self.sub_data_loader = None  # sub data loader for sub task

        self.current_epoch = 0  # info for train
        self.current_iteration = 0
        self.best_valid_acc = 0

        self.cls_i = None
        self.channel_importance = dict()

        self.all_list = list()

        self.named_modules_list = dict()
        self.named_conv_list = dict()

        self.original_conv_output = dict()

        self.stayed_channels = dict()

        self.init_graph()
Esempio n. 13
0
    def __init__(self, config):
        super().__init__(config)

        # define data_loader
        self.data_loader = TextDataLoader(config)
        
        # define models
        self.model = Text_Encoder(config)

        # define loss
        self.loss = nn.NLLLoss()

        # define optimizers for both generator and discriminator
        self.optimizer = optim.Adam(self.model.parameters(), lr=self.config.learning_rate)
        # Define Scheduler
        #lambda1 = lambda epoch: pow((1 - ((epoch - 1) / self.config.max_epoch)), 0.9)
        #self.scheduler = lr_scheduler.LambdaLR(self.optimizer, lr_lambda=lambda1)
        self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer,mode='min')
        # initialize my counters
        self.current_epoch = 0
        self.current_iteration = 0
        self.best_valid_accuracy = 0

        # Check is cuda is available or not
        self.is_cuda = torch.cuda.is_available()
        # Construct the flag and make sure that cuda is available
        self.cuda = self.is_cuda & self.config.cuda

        if self.cuda:
            torch.cuda.manual_seed_all(self.config.seed)
            self.device = torch.device("cuda")
            torch.cuda.set_device(self.config.gpu_device)
            self.logger.info("Operation will be on *****GPU-CUDA***** ")
            print_cuda_statistics()

        else:
            self.device = torch.device("cpu")
            torch.manual_seed(self.config.seed)
            self.logger.info("Operation will be on *****CPU***** ")
        self.config.device = self.device
        self.model = self.model.to(self.device)
        self.loss = self.loss.to(self.device)
        # Model Loading from the latest checkpoint if not found start from scratch.
        self.load_checkpoint(self.config.checkpoint_file)

        # Tensorboard Writer
        self.summary_writer = SummaryWriter(log_dir=self.config.summary_dir)
Esempio n. 14
0
    def __init__(self, config):
        super().__init__(config)

        # set cuda flag
        self.is_cuda = torch.cuda.is_available()
        if self.is_cuda and not self.config.cuda:
            self.logger.info(
                "WARNING: You have a CUDA device, so you should probably enable CUDA"
            )

        self.cuda = self.is_cuda & self.config.cuda

        # set the manual seed for torch
        self.manual_seed = self.config.seed
        if self.cuda:
            torch.cuda.manual_seed(self.manual_seed)
            self.device = torch.device("cuda")
            torch.cuda.set_device(self.config.gpu_device)
            self.model = self.model.to(self.device)
            self.loss = self.loss.to(self.device)

            self.logger.info("Program will run on *****GPU-CUDA***** ")
            print_cuda_statistics()
        else:
            self.device = torch.device("cpu")
            torch.manual_seed(self.manual_seed)
            self.logger.info("Program will run on *****CPU*****\n")
        domain_name = self.config.current_domain
        img_root_folder = self.config.discriminator_datasets_root_dir
        self.class_labels = os.listdir(
            os.path.join(img_root_folder, domain_name, "train"))
        # define models
        no_class_labels = len(self.class_labels)
        self.model = DAFSL_ConceptDiscriminatorModel(
            config=self.config, no_class_labels=no_class_labels)

        # define optimizer
        self.optimizer = optim.SGD(self.model.parameters(),
                                   lr=self.config.learning_rate,
                                   momentum=self.config.momentum)

        #Model Loading from the latest checkpoint if not found start from scratch.
        self.load_checkpoint(self.config.checkpoint_file)
        # Summary Writer
        self.summary_writer = SummaryWriter(log_dir=self.config.summary_dir,
                                            comment='DAFSL_DCC')
Esempio n. 15
0
    def __init__(self, config):
        super().__init__(config)
        ## Select network
        if config.spatial_information == 'depth' and config.os == 16 and config.network == "SGNet" and config.mode != "measure_speed":
            from graphs.models.SGNet.SGNet import SGNet
        elif config.spatial_information == 'depth' and config.os == 16 and config.network == "SGNet":
            from graphs.models.SGNet.SGNet_fps import SGNet
        elif config.spatial_information == 'depth' and config.os == 16 and config.network == "SGNet_ASPP" and config.mode != "measure_speed":
            from graphs.models.SGNet.SGNet_ASPP import SGNet
        elif config.spatial_information == 'depth' and config.os == 16 and config.network == "SGNet_ASPP":
            from graphs.models.SGNet.SGNet_ASPP_fps import SGNet

        random.seed(self.config.seed)
        os.environ['PYTHONHASHSEED'] = str(self.config.seed)
        np.random.seed(self.config.seed)
        torch.manual_seed(self.config.seed)
        torch.cuda.manual_seed(self.config.seed)
        torch.cuda.manual_seed_all(self.config.seed)
        cudnn.enabled = True
        cudnn.benchmark = True
        cudnn.deterministic = True
        os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu
        # create data loader
        if config.dataset == "NYUD":
            self.testloader = data.DataLoader(NYUDataset_val_full(
                self.config.val_list_path),
                                              batch_size=1,
                                              shuffle=False,
                                              pin_memory=True)
        # Create an instance from the Model
        self.logger.info("Loading encoder pretrained in imagenet...")
        self.model = SGNet(self.config.num_classes)
        print(self.model)

        self.model.cuda()
        self.model.train()
        self.model.float()
        print(config.gpu)
        if config.mode != 'measure_speed':
            self.model = DataParallelModel(self.model, device_ids=[0])
            print('parallel....................')

        total = sum([param.nelement() for param in self.model.parameters()])
        print('  + Number of params: %.2fM' % (total / 1e6))
        print_cuda_statistics()
Esempio n. 16
0
    def __init__(self, config):
        super().__init__(config)

        # define models
        self.model = None

        # define data_loader
        self.data_loader = None

        # define loss
        self.loss = None

        # define optimizers for both generator and discriminator
        self.optimizer = None

        # initialize counter
        self.current_epoch = 0
        self.current_iteration = 0
        self.best_metric = 0

        # set cuda flag
        self.is_cuda = torch.cuda.is_available()
        if self.is_cuda and not self.config.cuda:
            self.logger.info(
                "WARNING: You have a CUDA device, so you should probably enable CUDA"
            )

        self.cuda = self.is_cuda & self.config.cuda

        # set the manual seed for torch
        self.manual_seed = self.config.seed
        if self.cuda:
            torch.cuda.manual_seed_all(self.manual_seed)
            torch.cuda.set_device(self.config.gpu_device)
            self.model = self.model.cuda()
            self.loss = self.loss.cuda()
            self.logger.info("Program will run on *****GPU-CUDA***** ")
            print_cuda_statistics()
        else:
            self.logger.info("Program will run on *****CPU*****\n")

        # Model Loading from the latest checkpoint if not found start from scratch.
        self.load_checkpoint(self.config.checkpoint_file)
        # Summary Writer
        self.summary_writer = None
Esempio n. 17
0
    def __init__(self, config):
        self.config = config
        self.logger = logging.getLogger("CondenseNetAgent")
        # Create an instance from the Model
        self.model = CondenseNet(self.config)
        # Create an instance from the data loader
        self.data_loader = Cifar10DataLoader(self.config)
        # Create instance from the loss
        self.loss = CrossEntropyLoss2d()
        # Create instance from the optimizer
        self.optimizer = torch.optim.SGD(self.model.parameters(),
                                         lr=self.config.learning_rate,
                                         momentum=float(self.config.momentum),
                                         weight_decay=self.config.weight_decay,
                                         nesterov=True)
        # initialize my counters
        self.current_epoch = 0
        self.current_iteration = 0
        self.best_valid_acc = 0
        # Check is cuda is available or not
        self.is_cuda = torch.cuda.is_available()
        # Construct the flag and make sure that cuda is available
        self.cuda = self.is_cuda & self.config.cuda

        if self.cuda:
            self.device = torch.device("cuda")
            torch.cuda.manual_seed_all(self.config.seed)
            torch.cuda.set_device(self.config.gpu_device)
            self.logger.info("Operation will be on *****GPU-CUDA***** ")
            print_cuda_statistics()
        else:
            self.device = torch.device("cpu")
            torch.manual_seed(self.config.seed)
            self.logger.info("Operation will be on *****CPU***** ")

        self.model = self.model.to(self.device)
        self.loss = self.loss.to(self.device)
        # Model Loading from the latest checkpoint if not found start from scratch.
        self.load_checkpoint(self.config.checkpoint_file)
        # Tensorboard Writer
        self.summary_writer = SummaryWriter(log_dir=self.config.summary_dir,
                                            comment='CondenseNet')
Esempio n. 18
0
    def __init__(self, config):
        self.config = config
        self.logger = logging.getLogger("STGAN")
        self.logger.info("Creating STGAN architecture...")

        self.G = Generator(len(self.config.attrs), self.config.g_conv_dim, self.config.g_layers, self.config.shortcut_layers, use_stu=self.config.use_stu, one_more_conv=self.config.one_more_conv)
        self.D = Discriminator(self.config.image_size, len(self.config.attrs), self.config.d_conv_dim, self.config.d_fc_dim, self.config.d_layers)

        self.data_loader = globals()['{}_loader'.format(self.config.dataset)](
            self.config.data_root, self.config.mode, self.config.attrs,
            self.config.crop_size, self.config.image_size, self.config.batch_size)

        self.current_iteration = 0
        self.cuda = torch.cuda.is_available() & self.config.cuda

        if self.cuda:
            self.device = torch.device("cuda")
            self.logger.info("Operation will be on *****GPU-CUDA***** ")
            print_cuda_statistics()
        else:
            self.device = torch.device("cpu")
            self.logger.info("Operation will be on *****CPU***** ")

        self.writer = SummaryWriter(log_dir=self.config.summary_dir)
Esempio n. 19
0
    def __init__(self, config):
        super().__init__()
        self.config = config
        self.logger = logging.getLogger("SHMAgent")
        self.logger.info(
            "Creating SHM architecture and loading pretrained weights...")

        self.model = SHM()
        self.data_loader = AdobeDIMDataLoader(self.config.data_root,
                                              self.config.mode,
                                              self.config.batch_size)
        self.current_epoch = 0
        self.cuda = torch.cuda.is_available() & self.config.cuda

        if self.cuda:
            self.device = torch.device("cuda")
            self.logger.info("Operation will be on *****GPU-CUDA***** ")
            print_cuda_statistics()
        else:
            self.device = torch.device("cpu")
            self.logger.info("Operation will be on *****CPU***** ")

        self.writer = SummaryWriter(log_dir=self.config.summary_dir,
                                    comment='SHM')
Esempio n. 20
0
    def __init__(self, config):
        super().__init__(config)
        self.config = config

        self.onlineExpert = ComputeECBSSolution(self.config)
        self.dataTransformer = DataTransformer(self.config)
        self.recorder = MonitoringMultiAgentPerformance(self.config)

        self.model = DecentralPlannerNet(self.config)
        self.logger.info("Model: \n".format(print(self.model)))

        # define data_loader
        self.data_loader = DecentralPlannerDataLoader(config=config)

        # define loss
        self.loss = CrossEntropyLoss()
        self.l1_reg = L1Regularizer(self.model)
        self.l2_reg = L2Regularizer(self.model)

        # define optimizers
        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=self.config.learning_rate,
                                    weight_decay=self.config.weight_decay)
        print(self.config.weight_decay)
        self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
            self.optimizer, T_max=self.config.max_epoch, eta_min=1e-6)

        # for param in self.model.parameters():
        #     print(param)

        # for name, param in self.model.state_dict().items():
        #     print(name, param)

        # initialize counter
        self.current_epoch = 0
        self.current_iteration = 0
        self.current_iteration_validStep = 0
        self.rateReachGoal = 0.0

        # set cuda flag
        self.is_cuda = torch.cuda.is_available()
        if self.is_cuda and not self.config.cuda:
            self.logger.info(
                "WARNING: You have a CUDA device, so you should probably enable CUDA"
            )

        self.cuda = self.is_cuda & self.config.cuda

        # set the manual seed for torch
        self.manual_seed = self.config.seed
        if self.cuda:
            torch.cuda.manual_seed_all(self.manual_seed)
            self.config.device = torch.device("cuda")
            torch.cuda.set_device(self.config.gpu_device)
            self.model = self.model.to(self.config.device)
            self.loss = self.loss.to(self.config.device)
            self.logger.info("Program will run on *****GPU-CUDA***** ")
            print_cuda_statistics()
        else:
            self.config.device = torch.device("cpu")
            torch.manual_seed(self.manual_seed)
            self.logger.info("Program will run on *****CPU*****\n")

        # Model Loading from the latest checkpoint if not found start from scratch.
        if self.config.train_TL or self.config.test_general:
            self.load_pretrained_checkpoint(self.config.test_epoch,
                                            lastest=self.config.lastest_epoch,
                                            best=self.config.best_epoch)
        else:
            self.load_checkpoint(self.config.test_epoch,
                                 lastest=self.config.lastest_epoch,
                                 best=self.config.best_epoch)
        # Summary Writer

        self.robot = multiRobotSim(self.config)
        self.switch_toOnlineExpert = False
        self.summary_writer = SummaryWriter(log_dir=self.config.summary_dir,
                                            comment='NerualMAPP')
        self.plot_graph = True
        self.save_dump_input = False
        self.dummy_input = None
        self.dummy_gso = None
        self.time_record = None
Esempio n. 21
0
    def __init__(self, config):
        super().__init__(config)
        # define models ( generator and discriminator)
        self.netG = Generator(self.config)
        self.netD = Discriminator(self.config)
        # define dataloader
        self.dataloader = CelebADataLoader(self.config)

        # define loss
        self.loss = BinaryCrossEntropy()

        # define optimizers for both generator and discriminator
        self.optimG = torch.optim.Adam(self.netG.parameters(),
                                       lr=self.config.learning_rate,
                                       betas=(self.config.beta1,
                                              self.config.beta2))
        self.optimD = torch.optim.Adam(self.netD.parameters(),
                                       lr=self.config.learning_rate,
                                       betas=(self.config.beta1,
                                              self.config.beta2))

        # initialize counter
        self.current_epoch = 0
        self.current_iteration = 0
        self.best_valid_mean_iou = 0

        self.fixed_noise = Variable(
            torch.randn(self.config.batch_size, self.config.g_input_size, 1,
                        1))
        self.real_label = 1
        self.fake_label = 0

        # set cuda flag
        self.is_cuda = torch.cuda.is_available()
        if self.is_cuda and not self.config.cuda:
            self.logger.info(
                "WARNING: You have a CUDA device, so you should probably enable CUDA"
            )

        self.cuda = self.is_cuda & self.config.cuda
        # set the manual seed for torch
        #if not self.config.seed:
        self.manual_seed = random.randint(1, 10000)
        #self.manual_seed = self.config.seed
        self.logger.info("seed: ", self.manual_seed)
        random.seed(self.manual_seed)
        if self.cuda:
            self.device = torch.device("cuda")
            torch.cuda.set_device(self.config.gpu_device)
            torch.cuda.manual_seed_all(self.manual_seed)
            self.logger.info("Program will run on *****GPU-CUDA***** ")
            print_cuda_statistics()
        else:
            self.device = torch.device("cpu")
            torch.manual_seed(self.manual_seed)
            self.logger.info("Program will run on *****CPU***** ")

        self.netG = self.netG.to(self.device)
        self.netD = self.netD.to(self.device)
        self.loss = self.loss.to(self.device)
        self.fixed_noise = self.fixed_noise.to(self.device)
        # Model Loading from the latest checkpoint if not found start from scratch.
        self.load_checkpoint(self.config.checkpoint_file)

        # Summary Writer
        self.summary_writer = SummaryWriter(log_dir=self.config.summary_dir,
                                            comment='DCGAN')
Esempio n. 22
0
    def __init__(self, config):
        super().__init__(config)

        # define models
        # self.model = resnet50(pretrained=True, num_classes=self.config.train_classes, is_train=True)
        # self.testmodel = resnet50(pretrained=False, num_classes=self.config.train_classes, is_train=False)

        self.model = resnet_50(pretrained=True,
                               num_classes=self.config.train_classes,
                               saliency=None,
                               pool_type="max_avg",
                               is_train=True,
                               scale=self.config.scale)

        self.testmodel = resnet_50(pretrained=False,
                                   num_classes=self.config.train_classes,
                                   saliency=None,
                                   pool_type="max_avg",
                                   is_train=False,
                                   threshold=self.config.threshold,
                                   scale=self.config.scale)

        # define data_loader
        # self.data_loader = Bird(config=config)
        # define data_loader
        if config.data_loader == 'Birds':
            self.data_loader = Bird(config=config)
        elif config.data_loader == 'Cars':
            self.data_loader = Car(config=config)
        elif config.data_loader == 'Sop':
            self.data_loader = Sop(config=config)
            pass
        elif config.data_loader == 'Isc':
            self.data_loader = Isc(config=config)
            pass
        else:
            raise Exception(
                "Please specify in the json a specified mode in data_mode")

        from queue import Queue, LifoQueue, PriorityQueue
        self.q = Queue(maxsize=5)

        # define loss
        self.loss = nn.CrossEntropyLoss()

        self.Center = Center()

        self.triplet = HardMiningLoss()

        # define optimizers for both generator and discriminator
        self.optimizer = torch.optim.SGD(self.model.parameters(),
                                         lr=self.config.learning_rate,
                                         momentum=self.config.momentum,
                                         weight_decay=self.config.momentum)

        # define scheduler
        # self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[5,10,25,55,75,95,500,1000],gamma=5e-1)
        self.decay_time = [False, False]
        self.init_lr = self.config.learning_rate
        self.decay_rate = 0.1

        # initialize counter
        self.current_epoch = 1
        self.current_iteration = 0
        self.best_metric = 0

        self.epoch_loss = AverageMeter()
        self.top1 = AverageMeter()
        self.top5 = AverageMeter()

        # set cuda flag
        self.is_cuda = torch.cuda.is_available()
        if self.is_cuda and not self.config.cuda:
            self.logger.info(
                "WARNING: You have a CUDA device, so you should probably enable CUDA"
            )

        self.cuda = self.is_cuda & self.config.cuda

        # set the manual seed for torch
        self.manual_seed = self.config.seed
        if self.cuda:
            torch.manual_seed(self.manual_seed)
            torch.cuda.manual_seed(self.manual_seed)
            self.device = torch.device("cuda")
            torch.cuda.set_device(self.config.gpu_device)
            self.model = self.model.to(self.device)
            self.testmodel = self.testmodel.to(self.device)
            self.loss = self.loss.to(self.device)

            self.logger.info("Program will run on *****GPU-CUDA***** ")
            print_cuda_statistics()
        else:
            self.logger.info("Program will run on *****CPU*****\n")

        # Model Loading from the latest checkpoint if not found start from scratch.
        self.load_checkpoint(self.config.checkpoint_file)
        # Summary Writer
        self.summary_writer = SummaryWriter(log_dir=self.config.summary_dir,
                                            comment="Agent")