Esempio n. 1
0
 def __init__(self):
     self.skills = {}
     for skill_name in util.skill_to_abil_mod_map.keys():
         self.skills[skill_name] = 0
     self.ability_scores = {}
     for sub_name in util.get_names():
         self.ability_scores[sub_name] = 0
     self.spec = None
Esempio n. 2
0
 def serialize(self):
     json_map = {
         "class_": self.class_.__class__.__name__,
         "race": self.race.__class__.__name__,
         "character_name": self.name,
         "level": self.level,
     }
     for sub_name in util.get_names():
         json_map[sub_name] = self.ability_scores.scores[sub_name]
     json_map["trained_skills"] = self.skills.trained.keys()
     json_map["feats"] = list(map(lambda x: x.__class__.__name__, self.feats))
     if self.class_.spec is not None:
         json_map["class_spec"] = self.class_.spec.lstrip("%s_" % self.class_.__class__.__name__)
     return json.dumps(json_map)
Esempio n. 3
0
 def __repr__(self):
     ret = "level : %s\n" % self.level
     ret += "initiative : %s\n" % self.get_initiative()
     ret += "speed  : %s\n" % self.get_speed()
     for sub_name in util.get_names():
         ret += "%s      : %s\n" % (sub_name, self.get_abil_mod(sub_name))
         ret += "%s(raw) : %s\n" % (sub_name, self.get_raw_abil(sub_name))
     for attr in sorted(util.skill_to_abil_mod_map.keys()):
         ret += "%s : %s\n" % (attr, self.get_skill(attr))
     ret += "AC : %s\n" % self.get_defense("AC")
     ret += "FORT: %s\n" % self.get_defense("fort")
     ret += "REF: %s\n" % self.get_defense("ref")
     ret += "WILL : %s\n" % self.get_defense("will")
     ret += "passive insight : %s\n" % self.get_passive_insight()
     ret += "passive perception : %s\n" % self.get_passive_perception()
     return ret
Esempio n. 4
0
def make_condition() -> str:
    names = util.get_names()

    condition = random.choice(util.cond_temp)
    comp = random.choice(util.comparables)
    vals = []
    for _ in range(2):
        tmp = random.randint(0, 1)
        if tmp == 0:
            vals.append(random.choice(names))
        else:
            dec = random.randint(0, 1)
            if dec == 0:
                vals.append(str(random.randint(0, 10000000)))
            else:
                vals.append(str(random.uniform(0, 10000000)))
    condition = (condition.replace("{oper}",
                                   comp).replace("{val1}", vals[0]).replace(
                                       "{val2}", vals[1]))
    return condition
Esempio n. 5
0
def main():
    names = get_names()
    # 是否已经查询该author信息和co、paper
    dealt_authors = {}
    dealt_paper = {}
    for name in names:
        authors = api.search_author(name)
        for author in authors:
            try:
                # 已经处理过该author
                if author['id'] in dealt_authors:
                    continue
                dealt_authors[author['id']] = True
                # 如果已经获取该author,就略过
                if util.has_get(author['id']):
                    logger.debug('author has get, author_id=%s', author['id'])
                    continue
                # 获取用户信息
                info = api.get_author(author['id'])
                db.upsert_author(info)
                # 查询合作者的信息
                for co in info['co-authors']:
                    if co['id'] in dealt_authors:
                        continue
                    util.start_get_author_thread(co['id'])
                # 查询paper
                for p in info['papers']:
                    # 判断该paper是否已经查询
                    if p['id'] in dealt_paper:
                        continue
                    dealt_paper[p['id']] = True
                    util.start_get_paper_thread(p['id'], author['id'])
            except Exception as e:
                logger.debug('get error in a author query', e)
            # 每个author停止2s
            time.sleep(1)
        # 每个name停止10s
        time.sleep(10)
Esempio n. 6
0
File: train.py Progetto: Onojimi/try
def train_net(
    net,
    writer,
    load,
    epochs=5,
    batch_size=1,
    lr=0.1,
    val_percent=0.1,
    save_cp=False,
    gpu=True,
):

    image_dir = 'train/images_cut/'
    mask_dir = 'train/masks_cut/'
    checkpoint_dir = 'checkpoints/'

    name_list = get_names(image_dir)
    split_list = train_val_split(name_list, val_percent)

    print('''
        Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(split_list['train']),
               len(split_list['val']), str(save_cp), str(gpu)))
    N_train = len(split_list['train'])
    optimizer = optim.Adam(net.parameters(), lr=lr, weight_decay=0.005)

    print('Model loaded from {}'.format(args.load))
    model_dict = net.state_dict()
    pretrained_dict = torch.load(args.load)
    #        pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
    model_dict.update(pretrained_dict)
    net.load_state_dict(model_dict)
    train_params = []
    if args.fix:
        print("fixing parameters")
        for k, v in net.named_parameters():
            train_params.append(k)
            pref = k[:12]
            if pref == 'module.conv1' or pref == 'module.conv2':
                v.requires_grad = False
                train_params.remove(k)

        optimizer = optim.Adam(filter(lambda p: p.requires_grad,
                                      net.parameters()),
                               lr=lr,
                               weight_decay=0.005)

    criterion = mixloss()

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()

        train = get_train_pics(image_dir, mask_dir, split_list)

        epoch_loss = 0

        for i, samps in enumerate(batch(train, batch_size)):
            images = np.array([samp['image'] for samp in samps])
            masks = np.array([samp['mask'] for samp in samps])

            images = torch.from_numpy(images).type(torch.FloatTensor)
            masks = torch.from_numpy(masks).type(torch.FloatTensor)

            if gpu:
                images = images.cuda()
                true_masks = masks.cuda()

            masks_pred = net(images)

            masks_probs_flat = masks_pred.view(-1)
            true_masks_flat = true_masks.view(-1)
            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train,
                                                     loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        avg_train_loss = epoch_loss / i
        print('Epoch finished ! Loss: {}'.format(avg_train_loss))

        val = get_val_pics(image_dir, mask_dir, split_list)

        if 1:
            val_iou, val_ls = eval_net(net, val, gpu)
            print('Validation IoU: {} Loss:{}'.format(val_iou, val_ls))

        writer.add_scalar('train/loss', avg_train_loss, epoch)
        writer.add_scalar('val/loss', val_ls, epoch)
        writer.add_scalar('val/IoU', val_iou, epoch)

        torch.save(net.state_dict(),
                   checkpoint_dir + 'CP{}.pth'.format(epoch + 1))
        print('Checkpoint {} saved !'.format(epoch + 1))