Esempio n. 1
0
    def __init__(self, num_experts, lr=0, cam_centers=None, gating_capacity=1):

        self.num_experts = num_experts
        self.lr = lr  # learning rate

        if cam_centers is None:
            cam_centers = torch.zeros(num_experts, 3)

        cam_centers = cam_centers.cuda()

        # setup gating network
        self.model_g = Gating(num_experts, gating_capacity)
        self.model_g = self.model_g.cuda()
        self.model_g.train()
        self.optimizer_g = optim.Adam(self.model_g.parameters(), lr=lr)

        # setup expert networks
        self.experts = []
        self.expert_opts = []

        for i in range(0, num_experts):

            model_e = Expert(cam_centers[i])
            model_e = model_e.cuda()
            model_e.train()
            optimizer_e = optim.Adam(model_e.parameters(), lr=lr)

            self.experts.append(model_e)
            self.expert_opts.append(optimizer_e)
Esempio n. 2
0
    from cluster_dataset import ClusterDataset
    trainset = ClusterDataset("training",
                              num_clusters=opt.clusters,
                              cluster=opt.expert)

trainset_loader = torch.utils.data.DataLoader(trainset,
                                              shuffle=True,
                                              num_workers=6)

model = Expert(torch.zeros((3, )))
model.load_state_dict(
    torch.load('expert_e%d_%s.net' % (opt.expert, opt.session)))

print("Successfully loaded model.")

model.cuda()
model.train()

model_file = 'expert_e%d_%s_refined.net' % (opt.expert, opt.session)

optimizer = optim.Adam(model.parameters(), lr=opt.learningrate)
scheduler = optim.lr_scheduler.StepLR(optimizer,
                                      step_size=opt.lrssteps,
                                      gamma=opt.lrsgamma)

iteration = 0
epochs = math.ceil(opt.iterations / len(trainset))

# keep track of training progress
train_log = open('log_refine_e%d_%s.txt' % (opt.expert, opt.session), 'w', 1)