Esempio n. 1
0
    def __init__(self, opt):
        self.opt = opt
        self.gpu_ids = opt.gpu_ids
        self.is_train = opt.is_train
        self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
        self.save_dir = join(opt.checkpoints_dir, opt.name)
        self.optimizer = None
        self.edge_features = None
        self.labels = None
        self.mesh = None
        self.soft_label = None
        self.loss = None

        #
        self.nclasses = opt.nclasses

        # load/define networks
        self.net = networks.define_classifier(opt.input_nc, opt.ncf, opt.ninput_edges, opt.nclasses, opt,
                                              self.gpu_ids, opt.arch, opt.init_type, opt.init_gain)
        self.net.train(self.is_train)
        self.criterion = networks.define_loss(opt).to(self.device)

        if self.is_train:
            if opt.optim == 'RMSprop' :
                self.optimizer = torch.optim.RMSprop(self.net.parameters(), lr=opt.lr)
            else:
                self.optimizer = torch.optim.Adam(self.net.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.scheduler = networks.get_scheduler(self.optimizer, opt)
            print_network(self.net)

        if not self.is_train or opt.continue_train:
            self.load_network(opt.which_epoch)
Esempio n. 2
0
    def __init__(self, opt):
        self.opt = opt
        self.gpu_ids = opt.gpu_ids
        self.is_train = opt.is_train
        self.device = torch.device('cuda:{}'.format(
            self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
        self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
        self.optimizer = None
        self.loss = None
        #

        # load/define networks
        self.net = network.define_prs(self.gpu_ids)
        self.net.train(self.is_train)
        self.criterion1 = network.define_loss_1(opt)
        self.criterion2 = network.define_loss_2(opt)

        if self.is_train:
            self.optimizer = torch.optim.Adam(self.net.parameters(),
                                              lr=opt.lr,
                                              betas=(opt.beta1, 0.999))
            self.scheduler = network.get_scheduler(self.optimizer, opt)
            print_network(self.net)

        if not self.is_train or opt.continue_train:
            self.load_network(opt.which_epoch)
Esempio n. 3
0
    def __init__(self, opt):
        self.opt = opt
        self.gpu_ids = opt.gpu_ids
        self.is_train = opt.is_train
        self.device = torch.device('cuda:{}'.format(
            self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
        self.save_dir = join(opt.checkpoints_dir, opt.name)
        self.optimizer = None
        self.edge_features = None
        self.labels = None
        self.mesh = None
        self.soft_label = None
        self.loss = None
        self.path = None
        self.nclasses = opt.nclasses

        # Adding input features additionally into the fully connected layer
        self.feature_keys = opt.features
        if self.feature_keys:
            self.feature_dictionaries = {
                feature: get_feature_dict(feature)
                for feature in self.feature_keys
            }
        self.feature_values = None
        # Logging results into a file for each testing epoch
        self.save_dir = join(opt.checkpoints_dir, opt.name)
        self.testacc_log = join(self.save_dir, 'testacc_full_log_')
        self.final_testacc_log = join(self.save_dir, 'final_testacc_full_log_')
        # Load/define networks
        self.net = networks.define_classifier(opt.input_nc,
                                              opt.ncf,
                                              opt.ninput_edges,
                                              opt.nclasses,
                                              opt,
                                              self.gpu_ids,
                                              opt.arch,
                                              opt.init_type,
                                              opt.init_gain,
                                              num_features=len(
                                                  self.feature_keys))
        self.net.train(self.is_train)
        self.criterion = networks.define_loss(opt).to(self.device)

        if self.is_train:
            self.optimizer = torch.optim.Adam(self.net.parameters(),
                                              lr=opt.lr,
                                              betas=(opt.beta1, 0.999))
            self.scheduler = networks.get_scheduler(self.optimizer, opt)
            print_network(self.net)

        if not self.is_train or opt.continue_train:
            self.load_network(opt.which_epoch)