Ejemplo n.º 1
0
    def get_train_data(self):
        mask = np.zeros([self.users_num, self.items_num], dtype=np.float32)
        corrupt_input = np.zeros([self.users_num, self.items_num],
                                 dtype=np.float32)

        for user, pos_items in self.user_pos_train.items():
            pos_len = len(pos_items)
            if self.corrupt_prob == 0:
                corrupt_input[user][pos_items] = 1
            elif self.corrupt_prob == 1:
                pass
            else:
                # corrupt input
                remain_num = int(pos_len * (1 - self.corrupt_prob))
                remain_pos = random_choice(pos_items,
                                           size=remain_num,
                                           replace=False)
                corrupt_input[user][remain_pos] = 1.0 / (1 - self.corrupt_prob)

            # mask loss
            neg_num = pos_len * self.neg_num
            if neg_num < self.items_num - pos_len:
                neg_items = random_choice(self.all_items,
                                          size=neg_num,
                                          replace=False,
                                          exclusion=pos_items)
                mask[user][neg_items.flatten()] = 1
                mask[user][pos_items.flatten()] = 1
            else:
                mask[user][:] = 1
        return csr_matrix(corrupt_input), csr_matrix(mask)
Ejemplo n.º 2
0
    def find_distributions(self, station, clss, kind):

        if self.source(station, clss, kind) == 'NoArrivals':
            return lambda: float('Inf')
        if self.source(station, clss, kind)[0] == 'Uniform':
            return lambda: uniform(
                self.source(station, clss, kind)[1],
                self.source(station, clss, kind)[2])
        if self.source(station, clss, kind)[0] == 'Deterministic':
            return lambda: self.source(station, clss, kind)[1]
        if self.source(station, clss, kind)[0] == 'Exponential':
            return lambda: expovariate(self.source(station, clss, kind)[1])
        if self.source(station, clss, kind)[0] == 'Normal':
            return lambda: truncated_normal(
                self.source(station, clss, kind)[1],
                self.source(station, clss, kind)[2])
        if self.source(station, clss, kind)[0] == 'Custom':
            return lambda: random_choice(
                array=self.source(station, clss, kind)[1],
                probs=self.source(station, clss, kind)[2])

        if self.source(station, clss, kind)[0] == 'UserDefined':
            return lambda: self.check_userdef_dist(
                self.source(station, clss, kind)[1])

        if self.source(station, clss, kind)[0] == 'TimeDependent':
            return lambda t: self.check_timedependent_dist(
                self.source(station, clss, kind)[1], kind, t)

        if self.source(station, clss, kind)[0] == 'Empirical':
            if isinstance(self.source(station, clss, kind)[1], str):
                return lambda: random_choice(
                    self.import_empirical(self.source(station, clss, kind)[1]))
            return lambda: random_choice(self.source(station, clss, kind)[1])
Ejemplo n.º 3
0
 def run(self, input):
     plain_blocks = self.splitted(input)
     perturbed_blocks = []
     current_pipeline = random_choice(self.sub_pipelines_weights)
     for pb in plain_blocks:
         if not probability_boolean(self.stickyness):
             current_pipeline = random_choice(self.sub_pipelines_weights)
         perturbed = self.sub_pipelines[current_pipeline].run(pb)
         perturbed_blocks.append(perturbed)
     perturbed_blocks = list(chain(*perturbed_blocks))
     return self.detokenizer.apply(perturbed_blocks)
Ejemplo n.º 4
0
def choose_encounter_for_level(level):
    table = {L0_ENCOUNTER: utils.from_dungeon_level(level, L0_ENCOUNTER_CHANCES),
             L1_ENCOUNTER: utils.from_dungeon_level(level, L1_ENCOUNTER_CHANCES),
             L2_ENCOUNTER: utils.from_dungeon_level(level, L2_ENCOUNTER_CHANCES),
             L3_ENCOUNTER: utils.from_dungeon_level(level, L3_ENCOUNTER_CHANCES),
             L4_ENCOUNTER: utils.from_dungeon_level(level, L4_ENCOUNTER_CHANCES),
             L5_ENCOUNTER: utils.from_dungeon_level(level, L5_ENCOUNTER_CHANCES),
             L6_ENCOUNTER: utils.from_dungeon_level(level, L6_ENCOUNTER_CHANCES),
             L7_ENCOUNTER: utils.from_dungeon_level(level, L7_ENCOUNTER_CHANCES)}
    encounter_level = utils.random_choice(table)
    encounter_table = ENCOUNTERS_TO_ENCOUNTER_TABLES[encounter_level]
    return utils.random_choice(encounter_table)
Ejemplo n.º 5
0
    def take_turn(self):
        if self.is_activated():
            # Launch
            choice = utils.random_choice(self.launch_table)
            if choice == SCOUT:
                fighter_component = Fighter(player=player, hp=10, defense=0, power=0, xp=30, base_speed=75,
                                            death_function=monster_death)
                ai_component = ScoutMonster(activated=True)
                enemy = Object(self.owner.x, self.owner.y, 'S', SCOUT, libtcod.darker_green, blocks=True,
                               fighter=fighter_component, ai=ai_component)
            elif choice == FIGHTER:
                fighter_component = Fighter(player=player, hp=30, defense=0, power=0, xp=50, base_speed=125,
                                            death_function=monster_death)
                ai_component = FighterMonster(activated=True)
                enemy = Object(self.owner.x, self.owner.y, 'F', FIGHTER, libtcod.darker_green, blocks=True,
                               fighter=fighter_component, ai=ai_component)
            objects.append(enemy)
            enemy.path_towards(player.x, player.y, game_map, objects, fov_map)

            # If the player is too close, flak burst
            if self.owner.distance_to(player) <= 4 and self.current_flak_cooldown == 0:
                fire_small_shotgun(caster=self.owner, target=player, spread=5, pellets=30)
                self.current_flak_cooldown += self.flak_cooldown

        if self.current_flak_cooldown > 0:
            self.current_flak_cooldown -= 1
Ejemplo n.º 6
0
def validate(args,
             epoch,
             val_data,
             device,
             model,
             criterion,
             supernet,
             choice=None):
    model.eval()
    val_loss = 0.0
    val_top1 = utils.AvgrageMeter()
    with torch.no_grad():
        for step, (inputs, targets) in enumerate(val_data):
            inputs, targets = inputs.to(device), targets.to(device)
            if supernet:
                if choice == None:
                    choice = utils.random_choice(args.num_choices, args.layers)
                outputs = model(inputs, choice)
            else:
                outputs = model(inputs)
            loss = criterion(outputs, targets)
            val_loss += loss.item()
            prec1, prec5 = utils.accuracy(outputs, targets, topk=(1, 5))
            n = inputs.size(0)
            val_top1.update(prec1.item(), n)
        print('[Val_Accuracy epoch:%d] val_loss:%f, val_acc:%f' %
              (epoch + 1, val_loss / (step + 1), val_top1.avg))
        return val_top1.avg
Ejemplo n.º 7
0
def train(args, epoch, train_loader, model, criterion, optimizer):
    model.train()
    lr = optimizer.param_groups[0]["lr"]
    train_acc = utils.AverageMeter()
    train_loss = utils.AverageMeter()
    steps_per_epoch = len(train_loader)
    for step, (inputs, targets) in enumerate(train_loader):
        inputs, targets = inputs.to(args.device), targets.to(args.device)
        optimizer.zero_grad()
        choice = utils.random_choice(args.num_choices, args.layers)
        outputs = model(inputs, choice)
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()
        prec1, prec5 = utils.accuracy(outputs, targets, topk=(1, 5))
        n = inputs.size(0)
        train_loss.update(loss.item(), n)
        train_acc.update(prec1.item(), n)
        if step % args.print_freq == 0 or step == len(train_loader) - 1:
            logging.info(
                '[Supernet Training] lr: %.5f epoch: %03d/%03d, step: %03d/%03d, '
                'train_loss: %.3f(%.3f), train_acc: %.3f(%.3f)' %
                (lr, epoch + 1, args.epochs, step + 1, steps_per_epoch,
                 loss.item(), train_loss.avg, prec1, train_acc.avg))
    return train_loss.avg, train_acc.avg
Ejemplo n.º 8
0
def train(args, epoch, train_data, device, model, criterion, optimizer,
          scheduler, supernet):
    model.train()
    train_loss = 0.0
    top1 = utils.AvgrageMeter()
    train_data = tqdm(train_data)
    train_data.set_description(
        '[%s%04d/%04d %s%f]' %
        ('Epoch:', epoch + 1, args.epochs, 'lr:', scheduler.get_lr()[0]))
    for step, (inputs, targets) in enumerate(train_data):
        inputs, targets = inputs.to(device), targets.to(device)
        optimizer.zero_grad()
        if supernet:
            choice = utils.random_choice(args.num_choices, args.layers)
            outputs = model(inputs, choice)
        else:
            outputs = model(inputs)
        loss = criterion(outputs, targets)
        # if args.dataset == 'cifar10':
        loss.backward()
        # elif args.dataset == 'imagenet':
        #     with amp.scale_loss(loss, optimizer) as scaled_loss:
        #         scaled_loss.backward()
        optimizer.step()
        prec1, prec5 = utils.accuracy(outputs, targets, topk=(1, 5))
        n = inputs.size(0)
        top1.update(prec1.item(), n)
        train_loss += loss.item()
        postfix = {
            'train_loss': '%.6f' % (train_loss / (step + 1)),
            'train_acc': '%.6f' % top1.avg
        }
        train_data.set_postfix(log=postfix)
Ejemplo n.º 9
0
 def get_a_track(self):
     """ randomly pick a particle,
     return it's associated list of hits"""
     event_key = random_choice(list(self.event_list.keys()))
     train, truth = self.event_list.get(event_key)
     pID = truth.sample(1).values
     hits = train[train['particle_id'] == pID[0]]['uID'].values
     return hits
Ejemplo n.º 10
0
 def choice_one(idx):
     p_tmp = p[idx] if p is not None else None
     exc = exclusion[idx] if exclusion is not None else None
     return random_choice(array,
                          size[idx],
                          replace=replace,
                          p=p_tmp,
                          exclusion=exc)
Ejemplo n.º 11
0
 def _get_element_for_grid(self):
     if self._use_elemental_rule:
         if utils.flip_coin():
             self._element = utils.random_choice([e for e in Element])
             if self._element != Element.NONE:
                 self._has_element = True
         else:
             self._element = Element.NONE
     else:
         self._element = Element.NONE
Ejemplo n.º 12
0
def randomtool(request):
    if request.method == 'POST':
        formitem = request.POST.getlist('formitem[]',None)
        formdescription = request.POST.get('formdescription',None)
        try:
            rendomchoice = random_choice(formitem)
        except:
            return HttpResponse("输入项有误")
        rendomresult = formdescription + " : " + rendomchoice
        return HttpResponse(rendomresult)
    else:
        return render(request,'randomtool.html')
Ejemplo n.º 13
0
    def find_next_patient(self):
        next_patient_indices = [
            i for i, ind in enumerate(self.all_patients)
            if ind.service_end_date == self.next_event_date
        ]

        if len(next_patient_indices) > 1:
            next_patient_index = random_choice(next_patient_indices)
        else:
            next_patient_index = next_patient_indices[0]

        return self.all_patients[next_patient_index], next_patient_index
Ejemplo n.º 14
0
 def change_patient_class(self, patient):
     """
     Takes patient and changes patient class
     according to a probability distribution.
     """
     if self.class_change:
         patient.prev_class = patient.patient_class
         patient.patient_class = random_choice(
             range(self.simulation.network.number_of_classes),
             self.class_change[patient.prev_class])
         patient.prev_priority_class = patient.priority_class
         patient.priority_class = self.simulation.network.priority_cls_mapping[
             patient.patient_class]
Ejemplo n.º 15
0
def train(args, epoch, train_data, device, model, criterion, optimizer, scheduler, supernet, choice=None, graft=False):
    model.train()


    ##### queue #########

    input_q = mp.Queue()
    output_q = mp.Queue()

    #####################

    train_loss = 0.0
    top1 = utils.AvgrageMeter()
    train_data = tqdm(train_data)
    eps = args.epochs

    if supernet == 'supernet':
        if choice is not None:
            eps = 50

    train_data.set_description('[%s%04d/%04d %s%f]' % ('Epoch:', epoch + 1, eps, 'lr:', scheduler.get_lr()[0]))

    if graft:
        model.hook_(i1, j1, get_activation_input_by_index(i))
        model.hook_(i2, j2, get_activation_output_by_index(i))
    
    for step, (inputs, targets) in enumerate(train_data):
        inputs, targets = inputs.to(device), targets.to(device)
        optimizer.zero_grad()
        if supernet == 'supernet':
            if choice is None:
                choice = utils.random_choice(args.num_choices, args.layers)
            outputs = model(inputs, choice)
        else:
            outputs = model(inputs)
        loss = criterion(outputs, targets)
        # if args.dataset == 'cifar10':
        loss.backward()
        # elif args.dataset == 'imagenet':
        #     with amp.scale_loss(loss, optimizer) as scaled_loss:
        #         scaled_loss.backward()
        optimizer.step()

        #model.move_to_cpu(choice)
        
        prec1, prec5 = utils.accuracy(outputs, targets, topk=(1, 5))
        n = inputs.size(0)
        top1.update(prec1.item(), n)
        train_loss += loss.item()
        postfix = {'train_loss': '%.6f' % (train_loss / (step + 1)), 'train_acc': '%.6f' % top1.avg}
        train_data.set_postfix(log=postfix)
Ejemplo n.º 16
0
def validate(args, val_loader, model, criterion):
    model.eval()
    val_loss = utils.AverageMeter()
    val_acc = utils.AverageMeter()
    with torch.no_grad():
        for step, (inputs, targets) in enumerate(val_loader):
            inputs, targets = inputs.to(args.device), targets.to(args.device)
            choice = utils.random_choice(args.num_choices, args.layers)
            outputs = model(inputs, choice)
            loss = criterion(outputs, targets)
            prec1, prec5 = utils.accuracy(outputs, targets, topk=(1, 5))
            n = inputs.size(0)
            val_loss.update(loss.item(), n)
            val_acc.update(prec1.item(), n)
    return val_loss.avg, val_acc.avg
Ejemplo n.º 17
0
    def find_next_active_station(self):
        """
        Returns the next active station:
        """
        next_event_date = min(
            [station.next_event_date for station in self.all_stations])

        next_active_station_indices = [
            i for i, station in enumerate(self.all_stations)
            if station.next_event_date == next_event_date
        ]

        if len(next_active_station_indices) > 1:
            return self.all_stations[random_choice(
                next_active_station_indices)]

        return self.all_stations[next_active_station_indices[0]]
Ejemplo n.º 18
0
    def get_training_data(self):
        users = []
        pos_items = []
        neg_items = []
        for u, pos in self.user_pos_train.items():
            pos_len = len(pos)
            neg = random_choice(self.all_items, size=pos_len, exclusion=pos)

            users.extend([u] * pos_len)
            pos_items.extend(pos.tolist())
            neg_items.extend(neg.tolist())

        return DataIterator(users,
                            pos_items,
                            neg_items,
                            batch_size=self.batch_size,
                            shuffle=True)
Ejemplo n.º 19
0
 def default_policy(self):
     # Default policy, used for rollouts.
     rollout_reward = 0
     obs = self.tree.latest_obs
     while not (self.env.won('x') or self.env.won('o') or self.env.draw()):
         # Choose a random action in the rollout.
         possible_actions = self._get_possible_actions()
         random_action = random_choice(list(possible_actions))
         if self.turn:
             next_obs, R, _, _ = self.env.step(random_action, self.player)
         else:
             next_obs, R, _, _ = self.env.step(random_action, \
                 self.other_player)
         obs = next_obs
         rollout_reward += R
         self.turn = not self.turn
     return rollout_reward
Ejemplo n.º 20
0
    def _get_neg_items(self, user):
        pos_item = self.user_pos_train[user]
        pos_len = len(pos_item)
        feed = {self.user_h: [user]}
        logits = self.sess.run(self.all_logits_tensor, feed_dict=feed)
        logits = np.reshape(logits, newshape=[-1])

        neg_pool = random_choice(self.all_items,
                                 size=self.neg_num * pos_len,
                                 exclusion=pos_item)

        neg_logits = logits[neg_pool]

        neg_pool = np.reshape(neg_pool, newshape=[pos_len, self.neg_num])
        neg_logits = np.reshape(neg_logits, newshape=[pos_len, self.neg_num])

        neg_item = neg_pool[np.arange(pos_len), np.argmax(neg_logits, axis=1)]
        return [user] * pos_len, pos_item, neg_item
Ejemplo n.º 21
0
    def __getitem__(self, idx):
        im_src, im_dst, cam_src, cam_dst, _ = self.dataset[idx]
        h, w = im_src.shape[:2]

        im1_ori = torch.from_numpy(im_src)
        im2_ori = torch.from_numpy(im_dst)

        im1_tensor = self.transform(im_src)
        im2_tensor = self.transform(im_dst)

        coord1 = data_utils.generate_query_kpts(im_src, self.config.train_kp,
                                                10 * self.config.num_pts, h, w)

        # if no keypoints are detected
        if len(coord1) == 0:
            return None

        # prune query keypoints that are not likely to have correspondence in the other image
        coord1 = utils.random_choice(coord1, self.config.num_pts)
        coord1 = torch.from_numpy(coord1).float()

        K_src, T_src = cam_src
        K_dst, T_dst = cam_dst

        T_src2dst = torch.from_numpy(T_dst.dot(np.linalg.inv(T_src)))
        F = compute_fundamental_from_poses(K_src, K_dst, T_src, T_dst)
        F = torch.from_numpy(F).float() / (F[-1, -1] + 1e-16)

        out = {
            'im1_ori': im1_ori,
            'im2_ori': im2_ori,
            'intrinsic1': K_src,
            'intrinsic2': K_dst,

            # Additional, for training
            'im1': im1_tensor,
            'im2': im2_tensor,
            'coord1': coord1,
            'F': F,
            'pose': T_src2dst
        }

        return out
Ejemplo n.º 22
0
    def __getitem__(self, idx):
        im1, im2, K_src, K_dst, F = self.dataset[idx]
        h, w = im1.shape[:2]

        im1_ori, im2_ori = torch.from_numpy(im1), torch.from_numpy(im2)

        im1_tensor = self.transform(im1)
        im2_tensor = self.transform(im2)

        coord1 = data_utils.generate_query_kpts(im1, self.config.train_kp,
                                                10 * self.config.num_pts, h, w)

        # if no keypoints are detected
        if len(coord1) == 0:
            return None

        # prune query keypoints that are not likely to have correspondence in the other image
        coord1 = utils.random_choice(coord1, self.config.num_pts)
        coord1 = torch.from_numpy(coord1).float()

        F = torch.from_numpy(F).float() / (F[-1, -1] + 1e-16)

        out = {
            'im1_ori': im1_ori,
            'im2_ori': im2_ori,
            'intrinsic1': K_src,
            'intrinsic2': K_dst,

            # Additional, for training
            'im1': im1_tensor,
            'im2': im2_tensor,
            'coord1': coord1,
            'F': F,

            # Pose is required in the base but not used in CAPSModel
            'pose': np.eye(4)
        }

        return out
Ejemplo n.º 23
0
 def tree_policy(self):
     # Policy that determines how to move through the MCTS tree.
     while not (self.env.won('x') or self.env.won('o') or self.env.draw()):
         # Get current node
         node = self.tree.nodes[self.tree.curr_node_num]
         # Get possible actions
         possible_actions = self._get_possible_actions()
         # Get already performed actions
         already_done = set(node.edges.keys())
         # What have we not tried?
         not_tried = possible_actions - already_done
         # Expand if it is possible to perform a new action
         if len(not_tried) > 0:
             self.expand(node, not_tried)
             self.turn = not self.turn
             return
         else:
             if self.turn:
                 # If our turn, choose what is suggested through UCB
                 action = self.best_action(node, 1)
             else:
                 action = random_choice(list(possible_actions))
             self.move(action)
             self.turn = not self.turn
Ejemplo n.º 24
0
 def random_choice(self) -> List[int]:
     return utils.random_choice(self.nb, self.nl)
Ejemplo n.º 25
0
    def __getitem__(self, item):
        imf1 = self.imf1s[item]
        imf2 = self.imf2s[item]
        im1_meta = self.images[imf1]
        im2_meta = self.images[imf2]
        im1 = io.imread(imf1)

        if self.args.use_stylization and self.is_stylized_frame[item]:
            imf2_s = osp.join(self.stylization_path, imf2[len(self.args.datadir) + 1:])
            if osp.isfile(imf2_s):
                imf2 = imf2_s

        im2 = io.imread(imf2)

        h, w = im1.shape[:2]

        intrinsic1 = self.get_intrinsics(im1_meta)
        intrinsic2 = self.get_intrinsics(im2_meta)

        extrinsic1 = self.get_extrinsics(im1_meta)
        extrinsic2 = self.get_extrinsics(im2_meta)

        relative = extrinsic2.dot(np.linalg.inv(extrinsic1))
        R = relative[:3, :3]
        # remove pairs that have a relative rotation angle larger than 80 degrees
        theta = np.arccos(np.clip((np.trace(R) - 1) / 2, -1, 1)) * 180 / np.pi
        if theta > 80 and self.phase == 'train':
            return None

        T = relative[:3, 3]
        tx = data_utils.skew(T)
        E_gt = np.dot(tx, R)
        F_gt = np.linalg.inv(intrinsic2).T.dot(E_gt).dot(np.linalg.inv(intrinsic1))

        # generate candidate query points
        coord1 = data_utils.generate_query_kpts(im1, self.args.train_kp, 10*self.args.num_pts, h, w)

        # if no keypoints are detected
        if len(coord1) == 0:
            return None

        # prune query keypoints that are not likely to have correspondence in the other image
        if self.args.prune_kp:
            ind_intersect = data_utils.prune_kpts(coord1,
                                                  F_gt,
                                                  im2.shape[:2],
                                                  intrinsic1,
                                                  intrinsic2,
                                                  relative,
                                                  d_min=4, d_max=400)
            if np.sum(ind_intersect) == 0:
                return None
            coord1 = coord1[ind_intersect]

        coord1 = utils.random_choice(coord1, self.args.num_pts)
        coord1 = torch.from_numpy(coord1).float()

        im1_ori, im2_ori = torch.from_numpy(im1), torch.from_numpy(im2)

        F_gt = torch.from_numpy(F_gt).float() / (F_gt[-1, -1] + 1e-10)
        intrinsic1 = torch.from_numpy(intrinsic1).float()
        intrinsic2 = torch.from_numpy(intrinsic2).float()
        pose = torch.from_numpy(relative[:3, :]).float()
        im1_tensor = self.transform(im1)
        im2_tensor = self.transform(im2)

        out = {'im1': im1_tensor,
               'im2': im2_tensor,
               'im1_ori': im1_ori,
               'im2_ori': im2_ori,
               'pose': pose,
               'F': F_gt,
               'intrinsic1': intrinsic1,
               'intrinsic2': intrinsic2,
               'coord1': coord1}

        return out
                                          train=False,
                                          download=False,
                                          transform=valid_transform)
    val_loader = torch.utils.data.DataLoader(valset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             pin_memory=True,
                                             num_workers=8)

    # random search
    start = time.time()
    best_acc = 0.0
    acc_list = list()
    best_choice = list()
    for epoch in range(args.random_search):
        choice = utils.random_choice(args.num_choices, args.layers)
        top1_acc = validate(args,
                            epoch,
                            val_loader,
                            device,
                            model,
                            criterion,
                            super=True,
                            choice=choice)
        acc_list.append(top1_acc)
        if best_acc < top1_acc:
            best_acc = top1_acc
            best_choice = choice
    print('acc_list:')
    for i in acc_list:
        print(i)
Ejemplo n.º 27
0
def train(model, device, args, *, bn_process=True, all_iters=None, reporter=None):
    optimizer = args.optimizer
    loss_function = args.loss_function
    scheduler = args.scheduler
    train_dataprovider = args.train_dataprovider
    task_id = args.task_id
    val_interval = args.val_interval
    display_interval = args.val_interval

    t1 = time.time()
    Top1_err, Top5_err = 0.0, 0.0
    model.train()
    # iters 1-> ,
    for iters in range(1, val_interval + 1):
        if bn_process:
            adjust_bn_momentum(model, iters)

        all_iters += 1
        d_st = time.time()

        data, target = train_dataprovider.next()
        target = target.type(torch.LongTensor)
        data, target = data.to(device), target.to(device)
        data_time = time.time() - d_st

        # search space
        if args.block==5:
            # shuffle, 4 choice in one block, 20 choices
            # get_random_cand = lambda:tuple(np.random.randint(4) for i in range(20)) # imagenet
            # get_random_cand = lambda:tuple(np.random.randint(2) for i in range(5)) # cifar
            get_random_cand = lambda:tuple(np.random.randint(args.sample_path) for i in range(args.block)) # cifar
            # uniform
            # flops restriction
            if args.flops_restriction:
                flops_l, flops_r, flops_step = 290, 360, 10
                bins = [[i, i + flops_step] for i in range(flops_l, flops_r, flops_step)]

                # 300 * 1000 000
                def get_uniform_sample_cand(*, timeout=500):
                    idx = np.random.randint(len(bins))
                    l, r = bins[idx]
                    for i in range(timeout):
                        cand = get_random_cand()
                        # if l*1e6 <= get_cand_flops(cand) <= r*1e6:
                        #     return cand
                        return cand
                    return get_random_cand()

                output = model(data, get_uniform_sample_cand())
            else:
                output = model(data, get_random_cand())

        elif args.block==12:
            # s1, sample_choice=1,45s/epoch, sample_choice=3,65s/epoch
            choice = random_choice(path_num=args.choice, m=args.sample_path, layers=args.block)
            output = model(data, choice)

        elif args.block==4:
            # cifar_fast
            # choice = random_choice(path_num=args.choice, m=1, layers=args.block)
            # batch = {'input': data, 'target': target, 'choice': choice}
            batch = {'input': data, 'target': target}
            states = model(batch)
            output = states['logits']

        elif args.block==3:
            # sample # (0,1,2) * 4 == 3^4= 81 # choice, [(0,1,2) (0,1,2)] *2
            # get_random_cand = lambda: tuple(np.random.randint(9) for i in range(2))
            # arch = [get_random_cand() for i in range(2)]
            # 9*9 +
            architecture = [np.random.randint(1) for i in range(2)]
            output = model(data, architecture)

        loss = loss_function(output, target)
        optimizer.zero_grad()
        loss.backward()

        for p in model.parameters():
            if p.grad is not None and p.grad.sum() == 0:
                p.grad = None


        optimizer.step()
        scheduler.step()

        prec1, prec5 = accuracy(output, target, topk=(1, 5))

        Top1_err += 1 - prec1.item() / 100
        Top5_err += 1 - prec5.item() / 100

        if all_iters % display_interval == 0: #20
            # print('{}-task_id: {}, lr: {}'.format(args.signal, args.task_id, args.learning_rate))

            printInfo = '{}-Task_id: {}, Base_lr: {:.2f},\t'.format(args.signal, args.task_id, args.learning_rate) + \
                        'TRAIN Epoch {}: Iters {}, lr = {:.4f}, \tloss = {:.4f},\t'.format(all_iters / display_interval, all_iters, scheduler.get_lr()[0], loss.item()) + \
                        'Top-1 err = {:.4f},\t'.format(Top1_err / display_interval) + \
                        'Top-5 err = {:.4f},\t'.format(Top5_err / display_interval) + \
                        'epoch_train_time = {:.2f}'.format(time.time() - t1)
                        # 'iter_load_data_time = {:.6f},\tepoch_train_time = {:.6f}'.format(data_time, time.time() - t1)

            # printInfo = '{}-Task_id: {}, Base_lr: {:.2f},\t'.format(args.signal, args.task_id, args.learning_rate) + \
            #             'TRAIN Epoch {}: lr = ({}):{:.4f}\{:.4f},\tloss = {:.4f},\t'.format(all_iters / display_interval, len(scheduler.get_lr()), scheduler.get_lr()[0],  scheduler.get_lr()[1],loss.item()) + \
            #             'Top-1 err = {:.4f},\t'.format(Top1_err / display_interval) + \
            #             'Top-5 err = {:.4f},\t'.format(Top5_err / display_interval) + \
            #             'epoch_train_time = {:.2f}'.format(time.time() - t1)
            #             'iter_load_data_time = {:.6f},\tepoch_train_time = {:.6f}'.format(data_time, time.time() - t1)
            logging.info(printInfo)

            t1 = time.time()
            report_top1, report_top5 = 1 - Top1_err/ display_interval, 1 - Top5_err / display_interval

            # print(all_iters / display_interval, report_top1)
            reporter(task_id=task_id, epoch=all_iters / display_interval, train_acc=report_top1)
            # reporter(task_id=task_id, epoch=all_iters / display_interval, train_acc=report_top1, lr_group = args.lr_group if args.lr_group else optimizer.param_groups['initial_lr'])

            Top1_err, Top5_err = 0.0, 0.0
            # lr group log
            for index, param_group in enumerate(optimizer.param_groups):
                logging.info("lr_group:({}/{}),update_lr/base_lr:{:.4f}/{:.4f}".format(index+1, len(scheduler.get_lr()), param_group['lr'], param_group['initial_lr']))


    val_top1_acc = 0
    # initial_lr

    if all_iters % (args.save_interval * val_interval) == 0:
        save_checkpoint(args.path, {'state_dict': model.state_dict(),}, all_iters, tag='{}_Supernet_'.format(task_id))

        latestfilename = os.path.join("{}/models/{}checkpoint-{:06}.pth.tar".format(args.path, '{}_Supernet_'.format(task_id), all_iters))
        logging.info(latestfilename)
        checkpoint = torch.load(latestfilename, map_location=None)
        model.load_state_dict(checkpoint['state_dict'], strict=True)
        val_top1_acc = validate(model, device, args, all_iters=all_iters, architecture=architecture)
        # reporter(task_id=task_id, epoch=all_iters / display_interval, train_acc=val_top1_acc)

    return all_iters, val_top1_acc
Ejemplo n.º 28
0
def place_objects(gm, zone, safe=False):
    num_satellites = utils.from_dungeon_level(dungeon_level, SATELLITES_PER_LEVEL)
    for _ in range(num_satellites):
        (x, y) = zone.random_coordinates()

        if not is_blocked(x, y, gm, objects):
            fighter_component = Fighter(player=player, hp=1, defense=9999, power=0, xp=0,
                                        death_function=projectile_death)
            monster = Object(x, y, '#', 'satellite', libtcod.white, blocks=True, fighter=fighter_component)
            non_interactive_objects.append(monster)
            # TODO: Hack!
            gm[x][y].blocked = True

    if not safe:
        encounter = tables.choose_encounter_for_level(dungeon_level)
    else:
        encounter = tables.EMPTY_ENCOUNTER
    zone.encounter = encounter
    enemies = tables.encounters_to_ship_lists[encounter]
    for choice in enemies:
        (x, y) = zone.random_unblocked_coordinates(gm, objects)

        if choice == SCOUT:
            fighter_component = Fighter(player=player, hp=10, defense=0, power=0, xp=30, base_speed=75,
                                        death_function=monster_death)
            ai_component = ScoutMonster()
            monster = Object(x, y, 'S', SCOUT, libtcod.darker_green, blocks=True, fighter=fighter_component,
                             ai=ai_component)
        elif choice == FIGHTER:
            fighter_component = Fighter(player=player, hp=30, defense=0, power=0, xp=50, base_speed=125,
                                        death_function=monster_death)
            ai_component = FighterMonster()
            monster = Object(x, y, 'F', FIGHTER, libtcod.darker_green, blocks=True, fighter=fighter_component,
                             ai=ai_component)
        elif choice == GUNSHIP:
            fighter_component = Fighter(player=player, hp=50, defense=4, power=3, xp=100, base_speed=100,
                                        death_function=monster_death)
            ai_component = GunshipMonster()
            monster = Object(x, y, 'G', GUNSHIP, libtcod.darker_green, blocks=True, fighter=fighter_component,
                             ai=ai_component)
        elif choice == FRIGATE:
            fighter_component = Fighter(player=player, hp=150, defense=10, power=3, xp=200, base_speed=250,
                                        death_function=monster_death)
            ai_component = FrigateMonster()
            monster = Object(x, y, 'R', FRIGATE, libtcod.darker_green, blocks=True, fighter=fighter_component,
                             ai=ai_component)
        elif choice == DESTROYER:
            fighter_component = Fighter(player=player, hp=200, defense=15, power=0, xp=500, base_speed=300,
                                        death_function=monster_death)
            ai_component = DestroyerMonster()
            monster = Object(x, y, 'D', DESTROYER, libtcod.darker_green, blocks=True,
                             fighter=fighter_component, ai=ai_component)
        elif choice == CRUISER:
            fighter_component = Fighter(player=player, hp=300, defense=10, power=0, xp=1000, base_speed=400,
                                        death_function=monster_death)
            ai_component = CruiserMonster()
            monster = Object(x, y, 'C', CRUISER, libtcod.darker_green, blocks=True,
                             fighter=fighter_component, ai=ai_component)
        elif choice == CARRIER:
            fighter_component = Fighter(player=player, hp=500, defense=0, power=0, xp=2000, base_speed=200,
                                        death_function=monster_death)
            ai_component = CarrierMonster()
            monster = Object(x, y, 'A', CARRIER, libtcod.darker_green, blocks=True,
                             fighter=fighter_component, ai=ai_component)
        elif choice == 'placeholder':
            print('placeholder encounter')
            fighter_component = Fighter(player=player, hp=10, defense=0, power=0, xp=30, base_speed=75,
                                        death_function=projectile_death)
            ai_component = ScoutMonster()
            monster = Object(x, y, 'P', 'placeholder', libtcod.darker_green, blocks=True, fighter=fighter_component,
                             ai=ai_component)

        objects.append(monster)

    max_items = utils.from_dungeon_level(dungeon_level, [[3, 1], [2, 4], [1, 6]])
    item_chances = {ITEM_DUCT_TAPE: 45,
                    ITEM_EXTRA_BATTERY: 25,
                    ITEM_RED_PAINT: 10,
                    ITEM_EMP: 10}

    # Place items
    num_items = libtcod.random_get_int(0, 0, max_items)
    for _ in range(num_items):
        (x, y) = zone.random_unblocked_coordinates(gm, objects)

        choice = utils.random_choice(item_chances)

        if choice == ITEM_DUCT_TAPE:
            item_component = Item(use_function=use_repair_player)
            item = Object(x, y, 't', ITEM_DUCT_TAPE, libtcod.violet, always_visible=True, item=item_component)
        elif choice == ITEM_EXTRA_BATTERY:
            item_component = Item(use_function=boost_player_power)
            item = Object(x, y, 'b', ITEM_EXTRA_BATTERY, libtcod.light_yellow, always_visible=True,
                          item=item_component)
        elif choice == ITEM_EMP:
            item_component = Item(use_function=cast_area_disable)
            item = Object(x, y, 'p', ITEM_EMP, libtcod.light_blue, always_visible=True,
                          item=item_component)
        elif choice == ITEM_RED_PAINT:
            item = Object(x, y, 'r', ITEM_RED_PAINT, libtcod.light_red, always_visible=True,
                          item=Item(use_function=boost_player_speed))

        objects.append(item)
        zone.register_item(item)
        item.send_to_back(objects)
Ejemplo n.º 29
0
 def expand(self, node, not_tried):
     # Create a new node from the given node. Chooses an action from the
     # not_tried set. Also moves to the newly created node.
     random_action = random_choice(list(not_tried))
     self.tree.new_node(random_action)
     return self.move(random_action)
Ejemplo n.º 30
0
    def __getitem__(self, idx):
        # 每一个epoch,从前往后读取self.ids,依据id,读取self.names
        # idx应为第几个batch,i为该次batch的起始点
        i = idx * batch_size
        # length为当前batch的大小
        length = min(batch_size, (len(self.names) - i))
        batch_x = np.empty((length, img_rows, img_cols, 3), dtype=np.float32)
        batch_y = np.empty((length, img_rows, img_cols, num_classes),
                           dtype=np.uint8)

        for i_batch in range(length):
            ###normal
            img_name = self.names[i]  # xx.jpg
            img_name_prefix, useless = os.path.splitext(img_name)
            mask_name = img_name_prefix + '.png'

            image_path = os.path.join(rgb_image_path, img_name)
            image = cv2.imread(image_path, 1)
            mask_path = os.path.join(mask_img_path, mask_name)
            mask = cv2.imread(mask_path, 0)

            ###temp
            # img_name = self.names[i] # xx.jpg
            # image_path = os.path.join(rgb_image_path, img_name)
            # image = cv2.imread(image_path,1)

            # img_name_prefix = img_name.split('split')[0][0:-1]
            # mask_name = img_name_prefix+'.png'
            # mask_path = os.path.join(mask_img_path, mask_name)
            # mask = cv2.imread(mask_path,0)
            ##mask = (mask!=0)*255

            # 随机缩放image和mask,0.5~2.0
            image, mask = random_rescale_image_and_mask(image, mask)

            # 实时处理alpha,得到trimap:128/0/255
            trimap = generate_random_trimap(mask)

            # 定义随机剪裁尺寸
            crop_size = (512, 512)
            # 获得剪裁的起始点,其目的是为了保证剪裁的图像中包含未知像素
            x, y = random_choice(trimap, crop_size)

            # 剪裁image,到指定剪裁尺寸crop_size,并缩放到(img_rows,img_cols)
            image = safe_crop(image, x, y, crop_size)
            # 剪裁trimap,到指定剪裁尺寸crop_size,并缩放到(img_rows,img_cols)
            trimap = safe_crop(trimap, x, y, crop_size)

            if np.random.random_sample() > 0.5:
                image = np.fliplr(image)
                trimap = np.fliplr(trimap)

            ### save the image/trimap crop patch
            # patch_save_dir = "show_data_loader"
            # image_patch_path = "show_data_loader" + '/' + img_name_prefix + '_image_' + str(i_batch) + '.png'
            # trimap_patch_path = "show_data_loader" + '/' + img_name_prefix + '_trimap_' + str(i_batch) + '.png'
            # cv2.imwrite(image_patch_path,image)
            # cv2.imwrite(trimap_patch_path,trimap)

            batch_x[i_batch] = image / 255.0
            batch_y[i_batch] = make_trimap_for_batch_y(trimap)

            i += 1

        return batch_x, batch_y
Ejemplo n.º 31
0
 def next_station(self, patient_class):
     return random_choice(array=self.simulation.all_stations[1:],
                          probs=self.transition_row[patient_class] +
                          [1.0 - sum(self.transition_row[patient_class])])