Exemple #1
0
def proposal_data():
    try:
        validate_arguments(['name'], 1)
        proposal = Proposal(request.args.get('name'))
        return jsonify(proposal.get_data())
    except Exception as e:
        return jsonify({'Error': str(e)})
Exemple #2
0
def proposal_upvote():
    try:
        validate_arguments(['name'], 1)
        proposal = Proposal(request.args.get('name'))
        proposal.upvote()
        return jsonify(
            {'Status': 'Proposal %s upvoted successfully' % proposal.name})
    except Exception as e:
        return jsonify({'Error': str(e)})
Exemple #3
0
def proposal_delete():
    try:
        validate_arguments(['name', 'username', 'fund_name'], 3)
        proposal = Proposal(request.args.get('name'))
        proposal.delete(request.args.get('username'),
                        request.args.get('fund_name'))
        return jsonify(
            {'Status': 'Proposal %s deleted successfully' % proposal.name})
    except Exception as e:
        return jsonify({'Error': str(e)})
Exemple #4
0
    def simulate_propose(self, cure_discovered, world_actions_queue):
        '''It simulates the gobernment decitions on a day'''
        ratio = self.infected_total / self.initial_population

        if (self.infected_total > self.initial_population / 2 or self.dead_total >= self.initial_population / 4) and\
         self.open_fronteir and not cure_discovered:
            action = 0
            for country in self.neighbours:
                action += country.infected_total / country.initial_population
            if len(self.neighbours) != 0 and action != 0:
                action /= len(self.neighbours)
                priority = action * ratio
                proposal_msg = 'close fronteirs'
                proposal_object = Proposal(priority, proposal_msg, self)
                self.add_proposal(proposal_object, world_actions_queue)
                # world_actions_queue.append(proposal_object)

        if (self.infected_total > 0.8 * self.initial_population or self.dead_total > 0.2 * self.initial_population) and\
         self.open_airport and not cure_discovered:
            action = 0.8
            priority = action * ratio
            proposal_msg = 'close airports'
            proposal_object = Proposal(priority, proposal_msg, self)
            self.add_proposal(proposal_object, world_actions_queue)
            # world_actions_queue.append(proposal_object)

        if self.infected_total > self.initial_population / 3 and not self.has_mask:
            action = 0.5
            priority = action * ratio
            proposal_msg = 'give masks'
            proposal_object = Proposal(priority, proposal_msg, self)
            self.add_proposal(proposal_object, world_actions_queue)
            # world_actions_queue.append(proposal_object)

        if ((self.infected_total <= self.initial_population / 2
             and self.dead_total < self.initial_population / 4)
                and not self.open_fronteir) or (cure_discovered
                                                and not self.open_fronteir):
            action = 1 if cure_discovered else 0.7
            priority = action * ratio
            proposal_msg = 'open fronteirs'
            proposal_object = Proposal(priority, proposal_msg, self)
            self.add_proposal(proposal_object, world_actions_queue)
            # world_actions_queue.append(proposal_object)

        if ((self.infected_total <= 0.8 * self.initial_population
             and self.dead_total <= 0.2 * self.initial_population)
                and not self.open_airport) or (cure_discovered
                                               and not self.open_airport):
            action = 1 if cure_discovered else 0.7
            priority = action * ratio
            proposal_msg = 'open airports'
            proposal_object = Proposal(priority, proposal_msg, self)
            self.add_proposal(proposal_object, world_actions_queue)
Exemple #5
0
def proposal_create():
    try:
        validate_arguments(
            ['name', 'ticker', 'shares', 'transaction', 'user', 'fund'], 6)
        proposal = Proposal(request.args.get('name'))
        proposal.create(request.args.get('user'), request.args.get('fund'),
                        request.args.get('ticker'), 0,
                        request.args.get('shares'),
                        request.args.get('transaction'))
        return jsonify(
            {'Status': 'Proposal %s added successfully' % proposal.name})
    except Exception as e:
        return jsonify({'Error': str(e)})
Exemple #6
0
    def newReqProposal(self, imsg, index):
        clientId = imsg['clientid'] #int
        clientReqId = imsg['reqid'] #int
        value = imsg['value'] #str

        clientRetIP = imsg.get('retip') #str
        clientRetPort = imsg.get('retport') #int
        
        if not isinstance(clientId, int) or \
            not isinstance(clientReqId, int) or \
            not isinstance(value, (str, int)):
            # request is invalid
            print('REQUEST INVALID', file=sys.stderr)
            return
        
        gReqId = getGlobalReqId(clientId, clientReqId)

        logValue = {'id': gReqId, 'value': value }

        # special case from lookahead, already have promises, directly accept
        currentProposal = self.proposals.get(index)
        if currentProposal is not None and currentProposal.isReuse():
            currentProposal = self.proposals.get(index)
            currentProposal.setValue(logValue)
            currentProposal.setOrigValue(logValue)
            currentProposal.setReturnInfo((clientRetIP, clientRetPort))
            self._sendAccept(index)
            return
        elif currentProposal is not None:
            print('Error: Submitting a new proposal for index failed')
            return

        self.propNums[index] = self.basePropNum
        self.proposals[index] = Proposal(index, self.propNums[index], logValue, self.majority, clientRetIP, clientRetPort)
        self._sendPrepare(index)
Exemple #7
0
 def __init__(self, data=None, proposals=None, next_payload=Type.no_next_payload,
              critical=False):
     super(SA, self).__init__(data, next_payload, critical)
     self._type = 33
     if data is not None:
         self.parse(self._data)
     elif proposals is None:
         self.proposals = [
             #Proposal(None, 1, const.ProtocolID.IKE, transforms=[
                 #('ENCR_CAMELLIA_CBC', 256),
                 #('AUTH_HMAC_SHA2_256_128',),
                 #('PRF_HMAC_SHA2_256',),
                 #('DH_GROUP_14',)
             #]),
             Proposal(None, 1, const.ProtocolID.IKE, transforms=[
                 ('ENCR_AES_CBC', 128),
                 ('AUTH_HMAC_SHA2_256_128',),
                 ('PRF_HMAC_SHA2_256',),
                 ('DH_GROUP_14',)
             ])
         ]
         self.spi = self.proposals[0].spi
     else:
         self.proposals = proposals
         self.spi = self.proposals[0].spi
Exemple #8
0
 def parse(self, data):
     self.proposals = list()
     last = False
     self.spi = None
     while not last:
         proposal = Proposal(data)
         if proposal.spi:
             logger.debug("Setting SPI to: {}".format(proposal.spi))
             self.spi = proposal.spi
         self.proposals.append(proposal)
         last = proposal.last
         data = data[proposal.len:]
     logger.debug("got {} proposals".format(len(self.proposals)))
Exemple #9
0
    def __init__(self, cnn_net, num_class, batch_size=1, is_training=True):
        self._scope = 'vgg_16'
        if not is_training:
            self.reuse = tf.AUTO_REUSE
        else:
            self.reuse = None

        with tf.variable_scope(self._scope, self._scope, reuse=self.reuse):
            self.image = tf.placeholder(tf.float32, [1, None, None, 3])
            self.gt_boxes = tf.placeholder(tf.float32, [None, 5])
            self.im_info = tf.placeholder(tf.float32, [3])

        self.cnn_net = cnn_net
        self.batch_size = batch_size
        self.num_class = num_class
        self.is_training = is_training

        self._feat_stride = 16

        self.anchor_ratio = [0.5, 1, 2]
        self.base_anchors = [8, 16, 32]
        self.num_anchors = len(self.anchor_ratio) * len(self.base_anchors)

        if is_training:
            self.initializer = tf.truncated_normal_initializer(mean=0.0,
                                                               stddev=0.01)
            self.initializer_bbox = tf.truncated_normal_initializer(
                mean=0.0, stddev=0.001)
        else:
            self.initializer = tf.random_normal_initializer(mean=0.0,
                                                            stddev=0.01)
            self.initializer_bbox = tf.random_normal_initializer(mean=0.0,
                                                                 stddev=0.001)

        self.rpn = RPN(self.num_class, is_training, self.initializer,
                       self.batch_size)
        self.proposal = Proposal(self.num_class, is_training, self.initializer,
                                 self.batch_size)
Exemple #10
0
 def __init__(self,
              data=None,
              proposals=None,
              next_payload=Type.no_next_payload,
              critical=False):
     super(SA, self).__init__(data, next_payload, critical)
     self._type = 1
     if data is not None:
         self.parse(self._data)
     elif proposals is None:
         self.proposals = [
             Proposal(None,
                      1,
                      const.ProtocolID.IKE,
                      transforms=[(
                          ('SM1', 'SM3', 'CERT'),
                          1,
                      ), (('SM1', 'SHA', 'CERT'), 1, 2)])
         ]
         self.spi = self.proposals[0].spi
     else:
         self.proposals = proposals
         self.spi = self.proposals[0].spi
def nextProposal():
    request_content = request.get_json()

    print("Request: \n", json.dumps(request_content, indent=2))

    customer_grade = int(request_content['grade'])

    fixedLimits = limits.calculateFixedLimits(customer_grade,
                                              request_content['annual_inc'])

    print("Limits:")
    print("\tInterest Rate: {} - {}".format(fixedLimits['int_rate']['min'],
                                            fixedLimits['int_rate']['max']))
    print("\tAmount: {} - {}\n".format(fixedLimits['loan_amnt']['min'],
                                       fixedLimits['loan_amnt']['max']))

    result = Proposal.calculateNextProposal(
        predict, limits, customer_grade, request_content['annual_inc'],
        request_content['installments'], request_content['loan_amnt'],
        request_content['int_rate'], request_content['round'], fixedLimits)

    return jsonify(int_rate=result["int_rate"],
                   loan_amnt=result["loan_amnt"],
                   installments=result["installments"])
Exemple #12
0
class CycleDRPANModel(BaseModel):
    def name(self):
        return 'CycleDRPANModel'

    @staticmethod
    def modify_commandline_options(parser, is_train=True):
        # default CycleGAN did not use dropout
        parser.set_defaults(no_dropout=True)
        if is_train:
            parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
            parser.add_argument('--lambda_B', type=float, default=10.0,
                                help='weight for cycle loss (B -> A -> B)')
            parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')

        return parser

    def initialize(self, opt):
        BaseModel.initialize(self, opt)

        # specify the training losses you want to print out. The program will call base_model.get_current_losses
        self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B', 'R_A', 'GR_A']
        # specify the images you want to save/display. The program will call base_model.get_current_visuals
        if self.isTrain:
            visual_names_A = ['real_A', 'fake_B', 'rec_A', 'fake_Br', 'real_Ar', 'fake_Bf', 'real_Af']
            visual_names_B = ['real_B', 'fake_A', 'rec_B', 'fake_Ar', 'real_Br', 'fake_Af', 'real_Bf']

        else:
            visual_names_A = ['real_A', 'fake_B', 'rec_A']
            visual_names_B = ['real_B', 'fake_A', 'rec_B']

        if self.isTrain and self.opt.lambda_identity > 0.0:
            visual_names_A.append('idt_A')
            visual_names_B.append('idt_B')

        self.visual_names = visual_names_A + visual_names_B
        # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
        if self.isTrain:
            self.model_names = ['G_A', 'G_B', 'D_A', 'D_B', 'R_A', 'R_B']
        else:  # during test time, only load Gs
            self.model_names = ['G_A', 'G_B']

        # load/define networks
        # The naming conversion is different from those used in the paper
        # Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X), R_A(R_Y), R_B(R_X)
        self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
                                        not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
        self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,
                                        not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)

        if self.isTrain:
            use_sigmoid = opt.no_lsgan
            self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
                                            opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids)
            self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
                                            opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids)
            self.netR_A = networks.define_R(opt.input_nc, opt.output_nc, opt.ndf, opt.n_layers_D,
                                            opt.norm, use_sigmoid,
                                            opt.init_type, opt.init_gain, self.gpu_ids)
            self.netR_B = networks.define_R(opt.input_nc, opt.output_nc, opt.ndf, opt.n_layers_D,
                                            opt.norm, use_sigmoid,
                                            opt.init_type, opt.init_gain, self.gpu_ids)


        if self.isTrain:
            self.fake_A_pool = ImagePool(opt.pool_size)
            self.fake_B_pool = ImagePool(opt.pool_size)
            # define loss functions
            self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan).to(self.device)
            self.criterionCycle = torch.nn.L1Loss()
            self.criterionIdt = torch.nn.L1Loss()
            # initialize optimizers
            self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()),
                                                lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()),
                                                lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_R_A = torch.optim.Adam(self.netR_A.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_R_B = torch.optim.Adam(self.netR_B.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizers = []
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)
            self.optimizers.append(self.optimizer_R_A)
            self.optimizers.append(self.optimizer_R_B)

            self.proposal = Proposal()

            # self.batchsize = opt.batchSize
            # self.label_r = torch.FloatTensor(self.batchsize)

    def set_input(self, input):
        AtoB = self.opt.direction == 'AtoB'
        self.real_A = input['A' if AtoB else 'B'].to(self.device)
        self.real_B = input['B' if AtoB else 'A'].to(self.device)
        self.image_paths = input['A_paths' if AtoB else 'B_paths']

    def forward(self):
        self.fake_B = self.netG_A(self.real_A)
        self.rec_A = self.netG_B(self.fake_B)

        self.fake_A = self.netG_B(self.real_B)
        self.rec_B = self.netG_A(self.fake_A)

    def backward_D_basic(self, netD, real, fake):
        # Real
        pred_real = netD(real)
        loss_D_real = self.criterionGAN(pred_real, True)
        # Fake
        pred_fake = netD(fake.detach())
        loss_D_fake = self.criterionGAN(pred_fake, False)
        # Combined loss
        loss_D = (loss_D_real + loss_D_fake) * 0.5
        # backward
        loss_D.backward()
        return loss_D

    def backward_D_A(self):
        fake_B = self.fake_B_pool.query(self.fake_B)
        self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)

    def backward_D_B(self):
        fake_A = self.fake_A_pool.query(self.fake_A)
        self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)

    def reviser_A(self):
        # training with reviser
        for n_step in range(3):
            fake_B_ = self.netG_A(self.real_A)
            output = self.netD_A(fake_B_.detach())

            # proposal
            self.fake_Br, self.real_Ar, self.fake_Bf, self.real_Af, self.fake_ABf, self.real_ABr = self.proposal.forward_A(self.real_B, fake_B_, self.real_A, output)
            # train with real
            self.netD_A.zero_grad()
            output_r = self.netR_A(self.real_ABr.detach())
            self.loss_errR_real_A = self.criterionGAN(output_r, True)
            self.loss_errR_real_A.backward()

            # train with fake
            output_r = self.netR_A(self.fake_ABf.detach())
            self.loss_errR_fake_A = self.criterionGAN(output_r, False)
            self.loss_errR_fake_A.backward()

            self.loss_R_A = (self.loss_errR_real_A + self.loss_errR_fake_A) / 2
            self.optimizer_R_A.step()

            # train Generator with reviser
            self.netG_A.zero_grad()
            output_r = self.netR_A(self.fake_ABf)
            self.loss_GR_A = self.criterionGAN(output_r, True)
            self.loss_GR_A.backward()
            self.optimizer_G.step()

    def reviser_B(self):
        # training with reviser
        for n_step in range(3):
            fake_A_ = self.netG_B(self.real_B)
            output = self.netD_B(fake_A_.detach())

            # proposal
            self.fake_Ar, self.real_Br, self.fake_Af, self.real_Bf, self.fake_BAf, self.real_BAr = self.proposal.forward_B(self.real_A, fake_A_, self.real_B, output)
            # train with real
            self.netD_B.zero_grad()
            output_r = self.netR_B(self.real_BAr.detach())
            self.loss_errR_real_B = self.criterionGAN(output_r, True)
            self.loss_errR_real_B.backward()

            # train with fake
            output_r = self.netR_B(self.fake_BAf.detach())
            self.loss_errR_fake_B = self.criterionGAN(output_r, False)
            self.loss_errR_fake_B.backward()

            self.loss_R_B = (self.loss_errR_real_B + self.loss_errR_fake_B) / 2
            self.optimizer_R_B.step()

            # train Generator with reviser
            self.netG_B.zero_grad()
            output_r = self.netR_B(self.fake_BAf)
            self.errGAN_r = self.criterionGAN(output_r, True)
            self.loss_GR_B = self.errGAN_r
            self.loss_GR_B.backward()
            self.optimizer_G.step()


    def backward_G(self):
        lambda_idt = self.opt.lambda_identity
        lambda_A = self.opt.lambda_A
        lambda_B = self.opt.lambda_B
        # Identity loss
        if lambda_idt > 0:
            # G_A should be identity if real_B is fed.
            self.idt_A = self.netG_A(self.real_B)
            self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt
            # G_B should be identity if real_A is fed.
            self.idt_B = self.netG_B(self.real_A)
            self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt
        else:
            self.loss_idt_A = 0
            self.loss_idt_B = 0

        # GAN loss D_A(G_A(A))
        self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)
        # GAN loss D_B(G_B(B))
        self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)
        # Forward cycle loss
        self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A
        # Backward cycle loss
        self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B
        # combined loss
        self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B
        self.loss_G.backward()

    def optimize_parameters(self):
        # forward
        self.forward()
        # G_A and G_B
        self.set_requires_grad([self.netD_A, self.netD_B], False)
        self.optimizer_G.zero_grad()
        self.backward_G()
        self.optimizer_G.step()
        # D_A and D_B
        self.set_requires_grad([self.netD_A, self.netD_B], True)
        self.optimizer_D.zero_grad()
        self.backward_D_A()
        self.backward_D_B()
        self.optimizer_D.step()
        # R_A and R_B
        self.set_requires_grad([self.netR_A, self.netR_B], True)
        self.optimizer_R_A.zero_grad()
        self.optimizer_R_B.zero_grad()
        self.reviser_A()
        self.reviser_B()
Exemple #13
0
    def initialize(self, opt):
        BaseModel.initialize(self, opt)

        # specify the training losses you want to print out. The program will call base_model.get_current_losses
        self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B', 'R_A', 'GR_A']
        # specify the images you want to save/display. The program will call base_model.get_current_visuals
        if self.isTrain:
            visual_names_A = ['real_A', 'fake_B', 'rec_A', 'fake_Br', 'real_Ar', 'fake_Bf', 'real_Af']
            visual_names_B = ['real_B', 'fake_A', 'rec_B', 'fake_Ar', 'real_Br', 'fake_Af', 'real_Bf']

        else:
            visual_names_A = ['real_A', 'fake_B', 'rec_A']
            visual_names_B = ['real_B', 'fake_A', 'rec_B']

        if self.isTrain and self.opt.lambda_identity > 0.0:
            visual_names_A.append('idt_A')
            visual_names_B.append('idt_B')

        self.visual_names = visual_names_A + visual_names_B
        # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
        if self.isTrain:
            self.model_names = ['G_A', 'G_B', 'D_A', 'D_B', 'R_A', 'R_B']
        else:  # during test time, only load Gs
            self.model_names = ['G_A', 'G_B']

        # load/define networks
        # The naming conversion is different from those used in the paper
        # Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X), R_A(R_Y), R_B(R_X)
        self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
                                        not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
        self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,
                                        not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)

        if self.isTrain:
            use_sigmoid = opt.no_lsgan
            self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
                                            opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids)
            self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
                                            opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids)
            self.netR_A = networks.define_R(opt.input_nc, opt.output_nc, opt.ndf, opt.n_layers_D,
                                            opt.norm, use_sigmoid,
                                            opt.init_type, opt.init_gain, self.gpu_ids)
            self.netR_B = networks.define_R(opt.input_nc, opt.output_nc, opt.ndf, opt.n_layers_D,
                                            opt.norm, use_sigmoid,
                                            opt.init_type, opt.init_gain, self.gpu_ids)


        if self.isTrain:
            self.fake_A_pool = ImagePool(opt.pool_size)
            self.fake_B_pool = ImagePool(opt.pool_size)
            # define loss functions
            self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan).to(self.device)
            self.criterionCycle = torch.nn.L1Loss()
            self.criterionIdt = torch.nn.L1Loss()
            # initialize optimizers
            self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()),
                                                lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()),
                                                lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_R_A = torch.optim.Adam(self.netR_A.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_R_B = torch.optim.Adam(self.netR_B.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizers = []
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)
            self.optimizers.append(self.optimizer_R_A)
            self.optimizers.append(self.optimizer_R_B)

            self.proposal = Proposal()
Exemple #14
0
def build_shared_model(params, config):
    if config.train_obj == "vi" and config.analytic_kl and \
            config.trans_mix_num_components != 1:
        raise ValueError("Analytic KL cannot be applied to mixtures.")

    gnn_attention = config.gnn_attention
    if gnn_attention is None:
        gnn_attention = ATTENTION_UNIFORM if config.mini_batch \
            else ATTENTION_SOFTMAX
    elif gnn_attention == ATTENTION_SOFTMAX and config.mini_batch:
        raise ValueError("Bad configuration.")

    assert not (config.embed_node_attr and params["dim_node_attrs"] <= 0)
    assert not (config.embed_edge_attr and params["dim_edge_attrs"] <= 0)
    assert not (config.learn_node_embed and config.const_num_nodes is None)
    dim_node_attr = (params["dim_node_embed"] if config.embed_node_attr else
                     params["dim_node_attrs"]) + (params["dim_node_embed"]
                                                  if config.learn_node_embed
                                                  else 0)
    dim_edge_attr = params["dim_edge_embed"] \
        if config.embed_edge_attr else params["dim_edge_attrs"]

    gnn_config = GNNConfig(num_heads=params["gnn_num_heads"],
                           dim_input=None,
                           dim_key=params["gnn_dim_key"],
                           dim_value=params["gnn_dim_value"],
                           dim_node_attr=dim_node_attr,
                           dim_edge_attr=dim_edge_attr,
                           impl=config.gnn_impl,
                           attention=gnn_attention,
                           messenger=config.gnn_messenger,
                           activation=config.gnn_activation,
                           layer_norm_in=params["gnn_layer_norm_in"],
                           layer_norm_out=params["gnn_layer_norm_out"],
                           skip_conn=False,
                           num_layers=1,
                           combiner=params["gnn_combiner"],
                           recurrent=params["gnn_recurrent"],
                           rnn_num_layers=params["rnn_num_layers"],
                           readout=READOUT_MEAN_MAX,
                           parallel_iterations=params["parallel_iterations"],
                           swap_memory=params["swap_memory"])

    trans_gnn_config = gnn_config.clone()
    trans_gnn_config.num_layers = params["trans_gnn_num_layers"]

    gen_model_params = dict(
        dim_hidden=params["dim_hidden"],
        dim_observ=params["dim_observs"],
        dim_latent=params["dim_latent"],
        dim_mlp=params["dim_mlp"],
        dim_global_input=params["dim_time_attrs"],
        const_num_nodes=params["const_num_nodes"],
        gnn_config=trans_gnn_config.clone(),
        rnn_num_layers=params["rnn_num_layers"],
        init_mix_num_components=params["init_mix_num_components"],
        trans_mix_num_components=params["trans_mix_num_components"],
        trans_mlp_num_layers=params["trans_mlp_num_layers"],
        trans_activation=params["trans_activation"],
        trans_layer_norm=params["trans_layer_norm"],
        trans_scale_activation=params["trans_scale_activation"],
        trans_scale_shift=params["trans_scale_shift"],
        trans_scale_identical=params["trans_scale_identical"],
        trans_skip_conn=params["trans_skip_conn"],
        trans_ar=params["trans_ar"],
        trans_global_low_rank=params["trans_global_low_rank"],
        trans_local_low_rank=params["trans_local_low_rank"],
        trans_global_flow=config.global_flow,
        trans_flow_num_layers=params["trans_flow_num_layers"],
        trans_flow_mv_factor=params["flow_mv_factor"],
        trans_flow_skip_conn=config.flow_skip_conn,
        emit_low_rank=params["emit_low_rank"],
        emit_mix_num_components=params["emit_mix_num_components"],
        emit_mlp_num_layers=params["emit_mlp_num_layers"],
        emit_activation=params["emit_activation"],
        emit_scale_activation=params["emit_scale_activation"],
        emit_scale_shift=params["emit_scale_shift"],
        emit_scale_identical=params["emit_scale_identical"],
        emit_neg_binomial=params["emit_neg_binomial"],
        emit_loc_scale_type=params["emit_loc_scale_type"],
        emit_non_markov=params["emit_non_markov"],
        emit_identity=params["emit_identity"])
    if config.markov:
        gen_model = MarkovModel(**gen_model_params)
    else:
        gen_model = NonMarkovModel(**gen_model_params)

    perturb_noise_scale = tf.train.linear_cosine_decay(
        config.noise_scale,
        tf.train.get_or_create_global_step(),
        config.noise_scale_decay_steps or config.num_steps,
        alpha=0.0,
        beta=config.noise_scale_min_ratio)
    tb.summary.scalar("perturb_noise_scale", perturb_noise_scale)

    proposal_gnn_config = gnn_config.clone()
    proposal_gnn_config.num_layers = params["proposal_gnn_num_layers"]

    proposal_params = dict(model=gen_model,
                           dim_mlp=params["dim_mlp"],
                           mlp_num_layers=params["trans_mlp_num_layers"],
                           rnn_num_layers=params["rnn_num_layers"],
                           global_flow=config.global_flow,
                           flow_num_layers=params["proposal_flow_num_layers"],
                           flow_mv_factor=params["flow_mv_factor"],
                           flow_skip_conn=config.flow_skip_conn,
                           global_low_rank=params["trans_global_low_rank"],
                           local_low_rank=params["trans_local_low_rank"],
                           loc_activation=params["proposal_loc_activation"],
                           loc_layer_norm=params["proposal_loc_layer_norm"],
                           scale_activation=params["trans_scale_activation"],
                           scale_shift=params["trans_scale_shift"],
                           scale_identical=config.proposal_scale_identical,
                           gnn_config=proposal_gnn_config.clone(),
                           use_belief=config.use_belief,
                           use_lookahead=config.use_lookahead,
                           use_skip_conn=config.use_skip_conn,
                           use_gated_adder=config.use_gated_adder,
                           reuse_gen_flow=config.reuse_gen_flow,
                           denoising=config.denoising,
                           noise_scale=perturb_noise_scale)

    if config.proposal == "indep":
        proposal_model = IndepProposal(**proposal_params,
                                       summarize_unit=SUMMARIZER_LSTM)
    elif config.proposal == "joint" and not config.mini_batch:
        proposal_model = Proposal(**proposal_params,
                                  summarize_unit=SUMMARIZER_RGNN)
    else:
        raise ValueError("Unknown/Invalid proposal type.")

    proposal_model = FactorizedWrapper(proposal_model)

    interleaving_rate = tf.train.polynomial_decay(
        config.interleaving_rate,
        tf.train.get_or_create_global_step(),
        config.interleaving_decay_steps or (config.num_steps // 2),
        (config.interleaving_rate_min_ratio * config.interleaving_rate),
        power=1.0)
    tb.summary.scalar("interleaving_rate", interleaving_rate)

    interleaving_scheduler = InterleavingScheduler(
        prefix_length=(config.prefix_length if config.prefixing else 0),
        rate=interleaving_rate,
        randomly=config.interleaving_randomly,
        refresh_last_step=False)
    if not config.interleaving:
        assert not config.prefixing
        interleaving_scheduler = None

    hamiltonian_is = None
    if config.his:
        hamiltonian_is = LearnableHIS(
            global_num_dims=params["dim_latent"],
            local_num_dims=params["dim_latent"],
            num_steps=params["his_num_leapfrog_steps"],
            max_step_size=params["his_max_step_size"],
            mass_scale=params["his_mass_scale"])

    if config.aux_task is not None and not config.use_lookahead:
        tf.logging.warning(
            "Auxiliary loss is optimized without using lookahead information.")
    aux_params = dict(
        dim_latent=params["dim_latent"],
        dim_summary=params["dim_hidden"],
    )
    if config.aux_task == AUX_ADJ:
        aux_model = EdgeClassifier(**aux_params)
    elif config.aux_task == AUX_CPC:
        aux_model = CPC(**aux_params)
    elif config.aux_task == AUX_DGI:
        aux_model = DGI(**aux_params)
    elif config.aux_task == AUX_ZF:
        aux_model = ZForcing(**aux_params,
                             dim_mlp=params["dim_mlp"],
                             mlp_num_layers=1,
                             num_future_steps=params["aux_zf_num_steps"])
    elif config.aux_task == AUX_MASK:
        aux_model = MaskedGNNDecoder(
            **aux_params,
            gnn_config=gnn_config.clone(),
            dim_mlp=params["dim_mlp"],
            num_masked_nodes=params["aux_mask_num_nodes"],
            all_at_once=params["aux_mask_all_at_once"])
    elif config.aux_task == AUX_MIX:
        aux_model = MIX(**aux_params,
                        dim_observ=params["dim_observs"],
                        gnn_config=gnn_config.clone(),
                        dim_mlp=params["dim_mlp"],
                        mlp_num_layers=2,
                        cpc_scale=params["aux_cpc_scale"],
                        cpc_state=params["aux_cpc_state"],
                        dgi_scale=params["aux_dgi_scale"],
                        zf_scale=params["aux_zf_scale"],
                        zf_num_future_steps=params["aux_zf_num_steps"],
                        mask_scale=params["aux_mask_scale"],
                        mask_num_nodes=params["aux_mask_num_nodes"],
                        mask_all_at_once=params["aux_mask_all_at_once"])
    elif config.aux_task is not None:
        raise ValueError("Unknown auxiliary task: " + config.aux_task)
    else:
        aux_model = None

    aux_weight = tf.train.polynomial_decay(
        config.aux_weight,
        tf.train.get_or_create_global_step(),
        config.aux_weight_decay_steps or config.num_steps,
        (config.aux_weight_min_ratio * config.aux_weight),
        power=1.0)
    tb.summary.scalar("aux_weight", aux_weight)

    return Model(gen_model=gen_model,
                 proposal_model=proposal_model,
                 aux_model=aux_model,
                 hamiltonian_is=hamiltonian_is,
                 interleaving_scheduler=interleaving_scheduler,
                 aux_weight=aux_weight)
Exemple #15
0
    def initialize(self, opt):
        BaseModel.initialize(self, opt)

        # specify the training losses you want to print out. The program will call base_model.get_current_losses
        self.loss_names = [
            'D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B',
            'R_A', 'GR_A'
        ]
        # specify the images you want to save/display. The program will call base_model.get_current_visuals
        if self.isTrain:
            visual_names_A = [
                'real_A', 'fake_B', 'rec_A', 'fake_Br', 'real_Ar', 'fake_Bf',
                'real_Af'
            ]
            visual_names_B = [
                'real_B', 'fake_A', 'rec_B', 'fake_Ar', 'real_Br', 'fake_Af',
                'real_Bf'
            ]

        else:
            visual_names_A = ['real_A', 'fake_B', 'rec_A']
            visual_names_B = ['real_B', 'fake_A', 'rec_B']

        if self.isTrain and self.opt.lambda_identity > 0.0:
            visual_names_A.append('idt_A')
            visual_names_B.append('idt_B')

        self.visual_names = visual_names_A + visual_names_B
        # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
        if self.isTrain:
            self.model_names = ['G_A', 'G_B', 'D_A', 'D_B', 'R_A', 'R_B']
        else:  # during test time, only load Gs
            self.model_names = ['G_A', 'G_B']

        # load/define networks
        # The naming conversion is different from those used in the paper
        # Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X), R_A(R_Y), R_B(R_X)
        self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf,
                                        opt.netG, opt.norm, not opt.no_dropout,
                                        opt.init_type, opt.init_gain,
                                        self.gpu_ids)
        self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf,
                                        opt.netG, opt.norm, not opt.no_dropout,
                                        opt.init_type, opt.init_gain,
                                        self.gpu_ids)

        if self.isTrain:
            use_sigmoid = opt.no_lsgan
            self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
                                            opt.n_layers_D, opt.norm,
                                            use_sigmoid, opt.init_type,
                                            opt.init_gain, self.gpu_ids)
            self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
                                            opt.n_layers_D, opt.norm,
                                            use_sigmoid, opt.init_type,
                                            opt.init_gain, self.gpu_ids)
            self.netR_A = networks.define_R(opt.input_nc, opt.output_nc,
                                            opt.ndf, opt.n_layers_D, opt.norm,
                                            use_sigmoid, opt.init_type,
                                            opt.init_gain, self.gpu_ids)
            self.netR_B = networks.define_R(opt.input_nc, opt.output_nc,
                                            opt.ndf, opt.n_layers_D, opt.norm,
                                            use_sigmoid, opt.init_type,
                                            opt.init_gain, self.gpu_ids)

        if self.isTrain:
            self.fake_A_pool = ImagePool(opt.pool_size)
            self.fake_B_pool = ImagePool(opt.pool_size)
            # define loss functions
            self.criterionGAN = networks.GANLoss(
                use_lsgan=not opt.no_lsgan).to(self.device)
            self.criterionCycle = torch.nn.L1Loss()
            self.criterionIdt = torch.nn.L1Loss()
            # initialize optimizers
            self.optimizer_G = torch.optim.Adam(itertools.chain(
                self.netG_A.parameters(), self.netG_B.parameters()),
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(itertools.chain(
                self.netD_A.parameters(), self.netD_B.parameters()),
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))
            self.optimizer_R_A = torch.optim.Adam(self.netR_A.parameters(),
                                                  lr=opt.lr,
                                                  betas=(opt.beta1, 0.999))
            self.optimizer_R_B = torch.optim.Adam(self.netR_B.parameters(),
                                                  lr=opt.lr,
                                                  betas=(opt.beta1, 0.999))
            self.optimizers = []
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)
            self.optimizers.append(self.optimizer_R_A)
            self.optimizers.append(self.optimizer_R_B)

            self.proposal = Proposal()
Exemple #16
0
class FasterRCNN:
    def __init__(self, cnn_net, num_class, batch_size=1, is_training=True):
        self._scope = 'vgg_16'
        if not is_training:
            self.reuse = tf.AUTO_REUSE
        else:
            self.reuse = None

        with tf.variable_scope(self._scope, self._scope, reuse=self.reuse):
            self.image = tf.placeholder(tf.float32, [1, None, None, 3])
            self.gt_boxes = tf.placeholder(tf.float32, [None, 5])
            self.im_info = tf.placeholder(tf.float32, [3])

        self.cnn_net = cnn_net
        self.batch_size = batch_size
        self.num_class = num_class
        self.is_training = is_training

        self._feat_stride = 16

        self.anchor_ratio = [0.5, 1, 2]
        self.base_anchors = [8, 16, 32]
        self.num_anchors = len(self.anchor_ratio) * len(self.base_anchors)

        if is_training:
            self.initializer = tf.truncated_normal_initializer(mean=0.0,
                                                               stddev=0.01)
            self.initializer_bbox = tf.truncated_normal_initializer(
                mean=0.0, stddev=0.001)
        else:
            self.initializer = tf.random_normal_initializer(mean=0.0,
                                                            stddev=0.01)
            self.initializer_bbox = tf.random_normal_initializer(mean=0.0,
                                                                 stddev=0.001)

        self.rpn = RPN(self.num_class, is_training, self.initializer,
                       self.batch_size)
        self.proposal = Proposal(self.num_class, is_training, self.initializer,
                                 self.batch_size)

    def build(self, mode):
        weights_regularizer = tf.contrib.layers.l2_regularizer(WEIGHT_DECAY)
        biases_regularizer = tf.no_regularizer

        with arg_scope([slim.conv2d, slim.conv2d_in_plane, \
                    slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected],
                    weights_regularizer=weights_regularizer,
                    biases_regularizer=biases_regularizer,
                    biases_initializer=tf.constant_initializer(0.0)):

            self.cnn_net.build(self.image,
                               is_training=self.is_training,
                               mode=mode)

            self.feature_input = self.cnn_net.get_output()

            with tf.variable_scope(self._scope, self._scope, reuse=self.reuse):
                rois = self.build_proposal()
                pool5 = self._crop_pool_layer(self.feature_input, rois, "crop")

                self.build_tail(pool5)

            if mode == 'train' or mode == 'val':
                self.build_loss()
                self.lr, self.train_op = self.build_train_op()

    def build_proposal(self):
        self.anchor_list = get_anchors(self.feature_input, self.im_info,
                                       self.anchor_ratio, self.base_anchors)
        self.rpn_layer()
        return self.proposal_layer()

    def rpn_layer(self):
        self.rpn_cls, self.rpn_bbox = self.rpn.build(self.feature_input,
                                                     self.gt_boxes,
                                                     self.im_info,
                                                     self.num_anchors,
                                                     self.anchor_list)

    def proposal_layer(self):
        self.proposal.build(self.rpn_cls, self.rpn_bbox, self.gt_boxes,
                            self.im_info, self.num_anchors, self.anchor_list)
        return self.proposal.rois

    def build_tail(self, rois):
        flatten_rois = slim.flatten(rois, scope='flatten')
        fc5 = slim.fully_connected(flatten_rois, 4096, scope="fc6")
        if self.is_training:
            fc5 = slim.dropout(fc5,
                               keep_prob=0.5,
                               is_training=True,
                               scope='dropout6')

        fc6 = slim.fully_connected(fc5, 4096, scope="fc7")
        if self.is_training:
            fc6 = slim.dropout(fc6,
                               keep_prob=0.5,
                               is_training=True,
                               scope='dropout7')

        self.cls_logit = slim.fully_connected(
            fc6,
            self.num_class,
            weights_initializer=self.initializer,
            trainable=self.is_training,
            activation_fn=None,
            scope='cls_logit')
        self.cls_prob = tf.nn.softmax(self.cls_logit)
        self.cls_pred = tf.argmax(self.cls_prob, axis=1, name="cls_pred")

        self.bbox_logit = slim.fully_connected(
            fc6,
            self.num_class * 4,
            weights_initializer=self.initializer_bbox,
            trainable=self.is_training,
            activation_fn=None,
            scope='bbox_logit')
        self.bbox_delta_pred = self.bbox_logit

    def _crop_pool_layer(self, bottom, rois, name):
        batch_ids = tf.squeeze(
            tf.slice(rois, [0, 0], [-1, 1], name="batch_id"), [1])
        # Get the normalized coordinates of bounding boxes
        bottom_shape = tf.shape(bottom)
        height = (tf.to_float(bottom_shape[1]) - 1.) * np.float32(
            self._feat_stride)
        width = (tf.to_float(bottom_shape[2]) - 1.) * np.float32(
            self._feat_stride)
        x1 = tf.slice(rois, [0, 1], [-1, 1], name="x1") / width
        y1 = tf.slice(rois, [0, 2], [-1, 1], name="y1") / height
        x2 = tf.slice(rois, [0, 3], [-1, 1], name="x2") / width
        y2 = tf.slice(rois, [0, 4], [-1, 1], name="y2") / height
        # Won't be back-propagated to rois anyway, but to save time
        bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1))
        pre_pool_size = POOLING_SIZE * 2
        crops = tf.image.crop_and_resize(bottom,
                                         bboxes,
                                         tf.to_int32(batch_ids),
                                         [pre_pool_size, pre_pool_size],
                                         name="crops")

        return slim.max_pool2d(crops, [2, 2], padding='SAME')

    def build_loss(self):
        rpn_cls_loss = self.get_cls_loss(self.rpn.cls_logit,
                                         self.rpn.cls_label)
        self.rpn_cross_entropy = rpn_cls_loss

        rpn_bbox_loss = self._smooth_l1_loss(self.rpn.bbox_logit,
                                             self.rpn.bbox_target,
                                             self.rpn.bbox_target_in_weight,
                                             self.rpn.bbox_target_out_weight,
                                             sigma=3.0,
                                             dim=[1, 2, 3])

        self.rpn_loss_box = rpn_bbox_loss

        # RCNN, class loss
        cls_label = tf.to_int32(self.proposal.cls_label, name="to_int32")
        cls_label = tf.reshape(cls_label, [-1])
        print("loss:", cls_label.shape)
        cls_loss = tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=self.cls_logit, labels=cls_label))  #[-1, 21], [-1,1]
        self.cross_entropy = cls_loss

        bbox_loss = self._smooth_l1_loss(self.bbox_logit,
                                         self.proposal.bbox_target,
                                         self.proposal.bbox_target_in_weight,
                                         self.proposal.bbox_target_out_weight)

        self.loss_box = bbox_loss

        loss = rpn_cls_loss + rpn_bbox_loss + cls_loss + bbox_loss
        regularization_loss = tf.add_n(tf.losses.get_regularization_losses(),
                                       'regu')
        self.loss = loss + regularization_loss
        #return self.loss

    def _smooth_l1_loss(self,
                        bbox_pred,
                        bbox_targets,
                        bbox_inside_weights,
                        bbox_outside_weights,
                        sigma=1.0,
                        dim=[1]):
        sigma_2 = sigma**2
        box_diff = bbox_pred - bbox_targets
        in_box_diff = bbox_inside_weights * box_diff
        abs_in_box_diff = tf.abs(in_box_diff)
        smoothL1_sign = tf.stop_gradient(
            tf.to_float(tf.less(abs_in_box_diff, 1. / sigma_2)))
        in_loss_box = tf.pow(in_box_diff, 2) * (sigma_2 / 2.) * smoothL1_sign \
                      + (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign)
        out_loss_box = bbox_outside_weights * in_loss_box
        loss_box = tf.reduce_mean(tf.reduce_sum(out_loss_box, axis=dim))
        return loss_box

    def build_train_op(self):
        lr = tf.Variable(0.01, trainable=False)
        i_global_op = tf.train.get_or_create_global_step()
        self.global_op = i_global_op
        self.optimizer = tf.train.MomentumOptimizer(lr, MOMENTUM)

        # Compute the gradients with regard to the loss
        gvs = self.optimizer.compute_gradients(self.loss)
        # Double the gradient of the bias if set
        if DOUBLE_BIAS:
            final_gvs = []
            with tf.variable_scope('Gradient_Mult') as scope:
                for grad, var in gvs:
                    scale = 1.
                    if '/biases:' in var.name:
                        scale *= 2.
                    if not np.allclose(scale, 1.0):
                        grad = tf.multiply(grad, scale)
                    final_gvs.append((grad, var))
            train_op = self.optimizer.apply_gradients(final_gvs,
                                                      global_step=i_global_op)

            for grad, var in final_gvs:
                if grad is not None:
                    tf.summary.histogram(var.op.name + '/gradients', grad)
        else:
            train_op = self.optimizer.apply_gradients(gvs,
                                                      global_step=i_global_op)
            for grad, var in gvs:
                if grad is not None:
                    tf.summary.histogram(var.op.name + '/gradients', grad)

        self.summary_op = tf.summary.merge_all()
        return lr, train_op

    def get_cls_loss(self, predict, target):
        '''
        [1,wieght, height, 9*2]
        '''
        rpn_cls_score = tf.reshape(predict, [-1, 2])
        rpn_label = tf.reshape(target, [-1])

        rpn_select = tf.where(tf.not_equal(rpn_label, -1))

        rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score, rpn_select),
                                   [-1, 2])
        rpn_label = tf.reshape(tf.gather(rpn_label, rpn_select), [-1])

        return tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=rpn_cls_score, labels=rpn_label))

    def train_step(self, sess, image, gt_boxes, im_info):
        loss, lr, global_step, _, summary_str = sess.run(
            [
                self.loss, self.lr, self.global_op, self.train_op,
                self.summary_op
            ],
            feed_dict={
                self.image: image,
                self.gt_boxes: gt_boxes,
                self.im_info: im_info.reshape(-1)
            })
        import math
        assert not math.isnan(loss)

        return loss, lr, global_step, summary_str

    def get_loss(self, sess, image, gt_boxes, im_info):
        loss = sess.run(self.loss,
                        feed_dict={
                            self.image: image,
                            self.gt_boxes: gt_boxes,
                            self.im_info: im_info.reshape(-1)
                        })
        import math
        assert not math.isnan(loss)

        return loss

    def predict(self, sess, image, im_info):
        #score and delta bbox
        score, delta_bbox, rois = sess.run(
            [self.cls_prob, self.bbox_delta_pred, self.proposal.rois],
            feed_dict={
                self.image: image,
                self.im_info: im_info.reshape(-1)
            })

        bbox = self.proposal.bbox_target_inv(rois, delta_bbox, im_info)

        assert score.shape[1] == self.num_class
        image_score_list = []
        image_bbox_list = []
        thresh = 0
        for i in range(self.num_class):
            inds = np.where(score[:, i] > thresh)[0]
            image_score = score[inds, i]
            image_bbox = bbox[inds, i * 4:(i + 1) * 4]
            image_score_list.append(image_score)
            image_bbox_list.append(image_bbox)

        image_scores = np.hstack(
            [image_score_list[i] for i in range(1, self.num_class)])
        image_thresh = np.sort(image_scores)[-3]
        print("thresh:", image_thresh)
        res_score = []
        res_bbox = []
        for i in range(1, self.num_class):
            #print ("class i:",i,image_score_list[i])
            keep = image_score_list[i] >= image_thresh

            image_score = image_score_list[i][keep]
            image_bbox = image_bbox_list[i][keep]
            res_score.append(image_score)
            res_bbox.append(image_bbox)
            #print ("get sore:",keep, i,image_score_list[i][keep].shape)
        return res_score, res_bbox

    def assign_lr(self, sess, rate):
        sess.run(tf.assign(self.lr, rate))
Exemple #17
0
class CycleDRPANModel(BaseModel):
    def name(self):
        return 'CycleDRPANModel'

    @staticmethod
    def modify_commandline_options(parser, is_train=True):
        # default CycleGAN did not use dropout
        parser.set_defaults(no_dropout=True)
        if is_train:
            parser.add_argument('--lambda_A',
                                type=float,
                                default=10.0,
                                help='weight for cycle loss (A -> B -> A)')
            parser.add_argument('--lambda_B',
                                type=float,
                                default=10.0,
                                help='weight for cycle loss (B -> A -> B)')
            parser.add_argument(
                '--lambda_identity',
                type=float,
                default=0.5,
                help=
                'use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1'
            )

        return parser

    def initialize(self, opt):
        BaseModel.initialize(self, opt)

        # specify the training losses you want to print out. The program will call base_model.get_current_losses
        self.loss_names = [
            'D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B',
            'R_A', 'GR_A'
        ]
        # specify the images you want to save/display. The program will call base_model.get_current_visuals
        if self.isTrain:
            visual_names_A = [
                'real_A', 'fake_B', 'rec_A', 'fake_Br', 'real_Ar', 'fake_Bf',
                'real_Af'
            ]
            visual_names_B = [
                'real_B', 'fake_A', 'rec_B', 'fake_Ar', 'real_Br', 'fake_Af',
                'real_Bf'
            ]

        else:
            visual_names_A = ['real_A', 'fake_B', 'rec_A']
            visual_names_B = ['real_B', 'fake_A', 'rec_B']

        if self.isTrain and self.opt.lambda_identity > 0.0:
            visual_names_A.append('idt_A')
            visual_names_B.append('idt_B')

        self.visual_names = visual_names_A + visual_names_B
        # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
        if self.isTrain:
            self.model_names = ['G_A', 'G_B', 'D_A', 'D_B', 'R_A', 'R_B']
        else:  # during test time, only load Gs
            self.model_names = ['G_A', 'G_B']

        # load/define networks
        # The naming conversion is different from those used in the paper
        # Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X), R_A(R_Y), R_B(R_X)
        self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf,
                                        opt.netG, opt.norm, not opt.no_dropout,
                                        opt.init_type, opt.init_gain,
                                        self.gpu_ids)
        self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf,
                                        opt.netG, opt.norm, not opt.no_dropout,
                                        opt.init_type, opt.init_gain,
                                        self.gpu_ids)

        if self.isTrain:
            use_sigmoid = opt.no_lsgan
            self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
                                            opt.n_layers_D, opt.norm,
                                            use_sigmoid, opt.init_type,
                                            opt.init_gain, self.gpu_ids)
            self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
                                            opt.n_layers_D, opt.norm,
                                            use_sigmoid, opt.init_type,
                                            opt.init_gain, self.gpu_ids)
            self.netR_A = networks.define_R(opt.input_nc, opt.output_nc,
                                            opt.ndf, opt.n_layers_D, opt.norm,
                                            use_sigmoid, opt.init_type,
                                            opt.init_gain, self.gpu_ids)
            self.netR_B = networks.define_R(opt.input_nc, opt.output_nc,
                                            opt.ndf, opt.n_layers_D, opt.norm,
                                            use_sigmoid, opt.init_type,
                                            opt.init_gain, self.gpu_ids)

        if self.isTrain:
            self.fake_A_pool = ImagePool(opt.pool_size)
            self.fake_B_pool = ImagePool(opt.pool_size)
            # define loss functions
            self.criterionGAN = networks.GANLoss(
                use_lsgan=not opt.no_lsgan).to(self.device)
            self.criterionCycle = torch.nn.L1Loss()
            self.criterionIdt = torch.nn.L1Loss()
            # initialize optimizers
            self.optimizer_G = torch.optim.Adam(itertools.chain(
                self.netG_A.parameters(), self.netG_B.parameters()),
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(itertools.chain(
                self.netD_A.parameters(), self.netD_B.parameters()),
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))
            self.optimizer_R_A = torch.optim.Adam(self.netR_A.parameters(),
                                                  lr=opt.lr,
                                                  betas=(opt.beta1, 0.999))
            self.optimizer_R_B = torch.optim.Adam(self.netR_B.parameters(),
                                                  lr=opt.lr,
                                                  betas=(opt.beta1, 0.999))
            self.optimizers = []
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)
            self.optimizers.append(self.optimizer_R_A)
            self.optimizers.append(self.optimizer_R_B)

            self.proposal = Proposal()

            # self.batchsize = opt.batchSize
            # self.label_r = torch.FloatTensor(self.batchsize)

    def set_input(self, input):
        AtoB = self.opt.direction == 'AtoB'
        self.real_A = input['A' if AtoB else 'B'].to(self.device)
        self.real_B = input['B' if AtoB else 'A'].to(self.device)
        self.image_paths = input['A_paths' if AtoB else 'B_paths']

    def forward(self):
        self.fake_B = self.netG_A(self.real_A)
        self.rec_A = self.netG_B(self.fake_B)

        self.fake_A = self.netG_B(self.real_B)
        self.rec_B = self.netG_A(self.fake_A)

    def backward_D_basic(self, netD, real, fake):
        # Real
        pred_real = netD(real)
        loss_D_real = self.criterionGAN(pred_real, True)
        # Fake
        pred_fake = netD(fake.detach())
        loss_D_fake = self.criterionGAN(pred_fake, False)
        # Combined loss
        loss_D = (loss_D_real + loss_D_fake) * 0.5
        # backward
        loss_D.backward()
        return loss_D

    def backward_D_A(self):
        fake_B = self.fake_B_pool.query(self.fake_B)
        self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)

    def backward_D_B(self):
        fake_A = self.fake_A_pool.query(self.fake_A)
        self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)

    def reviser_A(self):
        # training with reviser
        for n_step in range(3):
            fake_B_ = self.netG_A(self.real_A)
            output = self.netD_A(fake_B_.detach())

            # proposal
            self.fake_Br, self.real_Ar, self.fake_Bf, self.real_Af, self.fake_ABf, self.real_ABr = self.proposal.forward_A(
                self.real_B, fake_B_, self.real_A, output)
            # train with real
            self.netD_A.zero_grad()
            output_r = self.netR_A(self.real_ABr.detach())
            self.loss_errR_real_A = self.criterionGAN(output_r, True)
            self.loss_errR_real_A.backward()

            # train with fake
            output_r = self.netR_A(self.fake_ABf.detach())
            self.loss_errR_fake_A = self.criterionGAN(output_r, False)
            self.loss_errR_fake_A.backward()

            self.loss_R_A = (self.loss_errR_real_A + self.loss_errR_fake_A) / 2
            self.optimizer_R_A.step()

            # train Generator with reviser
            self.netG_A.zero_grad()
            output_r = self.netR_A(self.fake_ABf)
            self.loss_GR_A = self.criterionGAN(output_r, True)
            self.loss_GR_A.backward()
            self.optimizer_G.step()

    def reviser_B(self):
        # training with reviser
        for n_step in range(3):
            fake_A_ = self.netG_B(self.real_B)
            output = self.netD_B(fake_A_.detach())

            # proposal
            self.fake_Ar, self.real_Br, self.fake_Af, self.real_Bf, self.fake_BAf, self.real_BAr = self.proposal.forward_B(
                self.real_A, fake_A_, self.real_B, output)
            # train with real
            self.netD_B.zero_grad()
            output_r = self.netR_B(self.real_BAr.detach())
            self.loss_errR_real_B = self.criterionGAN(output_r, True)
            self.loss_errR_real_B.backward()

            # train with fake
            output_r = self.netR_B(self.fake_BAf.detach())
            self.loss_errR_fake_B = self.criterionGAN(output_r, False)
            self.loss_errR_fake_B.backward()

            self.loss_R_B = (self.loss_errR_real_B + self.loss_errR_fake_B) / 2
            self.optimizer_R_B.step()

            # train Generator with reviser
            self.netG_B.zero_grad()
            output_r = self.netR_B(self.fake_BAf)
            self.errGAN_r = self.criterionGAN(output_r, True)
            self.loss_GR_B = self.errGAN_r
            self.loss_GR_B.backward()
            self.optimizer_G.step()

    def backward_G(self):
        lambda_idt = self.opt.lambda_identity
        lambda_A = self.opt.lambda_A
        lambda_B = self.opt.lambda_B
        # Identity loss
        if lambda_idt > 0:
            # G_A should be identity if real_B is fed.
            self.idt_A = self.netG_A(self.real_B)
            self.loss_idt_A = self.criterionIdt(
                self.idt_A, self.real_B) * lambda_B * lambda_idt
            # G_B should be identity if real_A is fed.
            self.idt_B = self.netG_B(self.real_A)
            self.loss_idt_B = self.criterionIdt(
                self.idt_B, self.real_A) * lambda_A * lambda_idt
        else:
            self.loss_idt_A = 0
            self.loss_idt_B = 0

        # GAN loss D_A(G_A(A))
        self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)
        # GAN loss D_B(G_B(B))
        self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)
        # Forward cycle loss
        self.loss_cycle_A = self.criterionCycle(self.rec_A,
                                                self.real_A) * lambda_A
        # Backward cycle loss
        self.loss_cycle_B = self.criterionCycle(self.rec_B,
                                                self.real_B) * lambda_B
        # combined loss
        self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B
        self.loss_G.backward()

    def optimize_parameters(self):
        # forward
        self.forward()
        # G_A and G_B
        self.set_requires_grad([self.netD_A, self.netD_B], False)
        self.optimizer_G.zero_grad()
        self.backward_G()
        self.optimizer_G.step()
        # D_A and D_B
        self.set_requires_grad([self.netD_A, self.netD_B], True)
        self.optimizer_D.zero_grad()
        self.backward_D_A()
        self.backward_D_B()
        self.optimizer_D.step()
        # R_A and R_B
        self.set_requires_grad([self.netR_A, self.netR_B], True)
        self.optimizer_R_A.zero_grad()
        self.optimizer_R_B.zero_grad()
        self.reviser_A()
        self.reviser_B()