Exemple #1
0
    def gameOver(self):
        logger.info("Game lost, returning to menu.")
        self.loaded = False
        self.paused = True
        for m in self.mobs:
            m.debuff_list = []
        self.mobs = []  # Enemy mob sprite objects
        self.mob_count = 0  # This serve as mob's id
        for t in self.towers:
            t.target = None
        self.goal, self.spawn = None, None
        self.towers = []  # Tower sprite objects
        self.window.animations = []
        self.selected_mouse = None
        self.dragging = False
        self.highlighted = []
        self.active_tower = None
        self.mouse_drag_tower = None
        self.pf_queue = []

        try:
            pyglet.clock.unschedule(self.aiIncome)
        except:
            pass
        try:
            pyglet.clock.unschedule(self.updateState)
        except:
            pass
        try:
            pyglet.clock.unschedule(self.autospawnBalanced)
        except:
            pass

        self.window.flushWindow()
        self.window.showGameOverMenu()
Exemple #2
0
 def token_exists(self, token):
     if not self._rw_userdb:
         logger.info("no attempt to check if token exists, not enough privileges")
         return False;
     selecter = SQL("SELECT 1 FROM userdb.tokens WHERE id = %s")
     cur = self._execute(selecter, [token])
     return cur.rowcount == 1
Exemple #3
0
    def new_user(self, uid, pwd=None, full_name=None, about=None, url=None):
        """
        generates a new user, asks for the password interactively,
        and stores it in the DB. This is now replaced with bcrypt version
        """
        if not self._rw_userdb:
            logger.info("no attempt to create user, not enough privileges")
            return LmfdbAnonymousUser()

        if self.user_exists(uid):
            raise Exception("ERROR: User %s already exists" % uid)
        if not pwd:
            from getpass import getpass
            pwd_input = getpass("Enter  Password: "******"Repeat Password: "******"ERROR: Passwords do not match!")
            pwd = pwd_input
        password = self.bchash(pwd)
        from datetime import datetime
        #TODO: use identifiers
        insertor = SQL(u"INSERT INTO userdb.users (username, bcpassword, created, full_name, about, url) VALUES (%s, %s, %s, %s, %s, %s)")
        self._execute(insertor, [uid, password, datetime.utcnow(), full_name, about, url])
        new_user = LmfdbUser(uid)
        return new_user
Exemple #4
0
 def aiIncome(self, dt):
     if not self.paused:
         self.ai_gold += self.ai_flat_income
         self.ai_flat_income += 1
         self.ai_gold += (self.getTotalValue() + self.gold) // 10
         if self.ai_gold > 2000:
             self.mobtier = 2
         logger.info("AI current gold: {0}".format(self.ai_gold))
Exemple #5
0
 def change_password(self, uid, newpwd):
     if self._rw_userdb:
         bcpass = self.bchash(newpwd)
         #TODO: use identifiers
         updater = SQL("UPDATE userdb.users SET (bcpassword) = (%s) WHERE username = %s")
         self._execute(updater, [bcpass, uid])
         logger.info("password for %s changed!" % uid)
     else:
         logger.info("no attempt to change password, not enough privileges")
def stats():
    req_uri = ('https://api.stackexchange.com/2.2/users/{}?order=desc&'
               'sort=reputation&site=stackoverflow&filter={}'.format(
                   app.config['STACKOVERFLOW_USER_ID'],
                   '!0Z-LvhH.LNOKu1BHWnIjY_iHH'))
    r = http.request('GET', req_uri)
    resp = json.loads(r.data)
    logger.info('stackoverflow info: {}'.format(resp['items'][0]))
    return json.dumps(resp)
Exemple #7
0
 def delete_old_tokens(self):
     if not self._rw_userdb:
         logger.info("no attempt to delete old tokens, not enough privileges")
         return;
     deletor = SQL("DELETE FROM userdb.tokens WHERE expire < %s")
     now = datetime.utcnow()
     tdelta = timedelta(days=8)
     cutoff = now - tdelta
     self._execute(deletor, [cutoff])
Exemple #8
0
def stats():
    """
    :returns: number of followers and number of tweets
    """
    service = client()
    req = service.statuses.user_timeline()
    followers = req[0]['user']['followers_count']
    tweets = req[0]['user']['statuses_count']
    logger.info('you have {0} followers and {1} tweets'.format(
        followers, tweets))
    return followers, tweets
Exemple #9
0
    def save(self, data):
        if not self._rw_userdb:
            logger.info("no attempt to save, not enough privileges")
            return;

        data = dict(data) # copy
        uid = data.pop("username",None)
        if not uid:
            raise ValueError("data must contain username")
        if not self.user_exists(uid):
            raise ValueError("user does not exist")
        if not data:
            raise ValueError("no data to save")
        fields, values = zip(*data.items())
        updater = SQL("UPDATE userdb.users SET ({0}) = ({1}) WHERE username = %s").format(SQL(", ").join(map(Identifier, fields)), SQL(", ").join(Placeholder() * len(values)))
        self._execute(updater, list(values) + [uid])
Exemple #10
0
def scrobbled():
    """no of tracks scrobbled
    """
    fields = {
        'method': 'user.getRecentTracks',
        'user': app.config['LASTFM_USER'],
        'api_key': app.config['LASTFM_API_KEY'],
        'format': 'json'
    }

    url = 'http://ws.audioscrobbler.com/2.0'
    r = http.request('GET', url, fields=fields)
    resp = json.loads(r.data.decode('utf8'))
    # interested in the total for now, till "'from': 'date' is used in request
    scrobbled = int(resp['recenttracks']['@attr']['total'])
    logger.info('lastfm tracks scrobbled: {}'.format(scrobbled))
    return scrobbled
Exemple #11
0
    def authenticate(self, uid, pwd, bcpass=None, oldpass=None):
        if not self._rw_userdb:
            logger.info("no attempt to authenticate, not enough privileges")
            return False

        #TODO: use identifiers
        selecter = SQL("SELECT bcpassword, password FROM userdb.users WHERE username = %s")
        cur = self._execute(selecter, [uid])
        if cur.rowcount == 0:
            raise ValueError("User not present in database!")
        bcpass, oldpass = cur.fetchone()
        if bcpass:
            if bcpass == self.bchash(pwd, existing_hash = bcpass):
                return True
        else:
            for i in range(self.rmin, self.rmax + 1):
                if oldpass == self.hashpwd(pwd, str(i)):
                    bcpass = self.bchash(pwd)
                    if bcpass:
                        logger.info("user " + uid  +  " logged in with old style password, trying to update")
                        try:
                            #TODO: use identifiers
                            updater = SQL("UPDATE userdb.users SET (bcpassword) = (%s) WHERE username = %s")
                            self._execute(updater, [bcpass, uid])
                            logger.info("password update for " + uid + " succeeded")
                        except Exception:
                            #if you can't update the password then likely someone is using a local install
                            #log and continue
                            logger.warning("password update for " + uid + " failed!")
                        return True
                    else:
                        logger.warning("user " + uid + " logged in with old style password, but update was not possible")
                        return False
        return False
Exemple #12
0
 def authenticate(self, pwd):
     """
     checks if the given password for the user is valid.
     @return: True: OK, False: wrong password.
     """
     # from time import time
     # t1 = time()
     if not 'password' in self._data and not 'bcpassword' in self._data:
         logger.warning("no password data in db for '%s'!" % self._uid)
         return False
     bcpass = self._data.get('bcpassword', None)
     if bcpass:
         if bcpass == bchash(pwd, existing_hash = bcpass):
             self._authenticated = True
     else:
         for i in range(rmin, rmax + 1):
             if self._data['password'] == hashpwd(pwd, str(i)):
                 # log "AUTHENTICATED after %s!!" % (time() - t1)
                 bcpass = bchash(pwd)
                 if bcpass:
                     logger.info("user " + self._uid  +  " logged in with old style password, trying to update")
                     try:
                         self._data['bcpassword'] = bcpass
                         get_users().update_one({'_id': self._uid},
                                            {'$set':{'bcpassword':bcpass}})
                         logger.info("password update for " + self._uid + " succeeded")
                     except:
                         #if you can't update the password then likely someone is using a local install
                         #log and continue
                         logger.warning("password update for " + self._uid + " failed!")
                     self._authenticated = True
                 else:
                     logger.warning("user " + self._uid + " logged in with old style password, but update was not possible")
                     self._authenticated = False
                 break
     return self._authenticated
def delHost(id):
    opertorId = session.get("user")[u'id']
    host = hostService.get(id)
    hostService.delete(host)
    logger.info("userId is %s del host,hostInfo is %s", opertorId, str(host))
    return jsonify(dict(code=200))
Exemple #14
0
def train(config):
    cfg, cfg_data, cfg_model, cfg_optim = read_config(config)

    device, n_gpu = utils.get_device()
    utils.set_seeds(cfg.seed, n_gpu)

    train_batch_size = int(cfg_optim.train_batch_size /
                           cfg_optim.gradient_accumulation_steps)

    processor = get_class(cfg.task.lower())

    tokenizer = BertTokenizer.from_pretrained(cfg.bert_model,
                                              do_lower_case=cfg.do_lower_case)

    train_examples = None
    num_train_steps = None
    if cfg.do_train:
        train_examples = processor.get_train_examples(cfg_data.data_dir)
        num_train_steps = int(
            len(train_examples) / train_batch_size /
            cfg_optim.gradient_accumulation_steps * cfg_optim.num_train_epochs)

    label_list = processor.get_labels()
    # Prepare model
    print(PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(-1))
    model = BertForSequenceClassification.from_pretrained(
        cfg.bert_model,
        cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(-1),
        num_labels=len(label_list))

    model.to(device)

    # Prepare optimizer
    if cfg_optim.optimize_on_cpu:
        param_optimizer = [(n, param.clone().detach().to('cpu').requires_grad_()) \
                           for n, param in model.named_parameters()]
    else:
        param_optimizer = list(model.named_parameters())

    no_decay = ['bias', 'gamma', 'beta']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay_rate':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay_rate':
        0.0
    }]
    t_total = num_train_steps

    optimizer = BertAdam(optimizer_grouped_parameters,
                         lr=cfg_optim.learning_rate,
                         warmup=cfg_optim.warmup_proportion,
                         t_total=t_total)

    global_step = 0
    if cfg.do_train:
        train_features = convert_examples_to_features(train_examples,
                                                      label_list,
                                                      cfg_optim.max_seq_length,
                                                      tokenizer,
                                                      show_exp=False)
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_examples))
        logger.info("  Batch size = %d", train_batch_size)
        logger.info("  Num steps = %d", num_train_steps)

        train_dataloader = convert_features_to_tensors(train_features,
                                                       train_batch_size)

        model.train()
        best_score = 0
        flags = 0
        for _ in trange(int(cfg_optim.num_train_epochs), desc="Epoch"):
            for step, batch in enumerate(
                    tqdm(train_dataloader, desc="Iteration")):
                batch = tuple(t.to(device) for t in batch)
                input_ids, input_mask, segment_ids, label_ids = batch
                loss = model(input_ids, segment_ids, input_mask, label_ids)
                if n_gpu > 1:
                    loss = loss.mean()  # mean() to average on multi-gpu.
                if cfg_optim.fp16 and cfg_optim.loss_scale != 1.0:
                    # rescale loss for fp16 training
                    # see https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html
                    loss = loss * cfg_optim.loss_scale
                if cfg_optim.gradient_accumulation_steps > 1:
                    loss = loss / cfg_optim.gradient_accumulation_steps
                loss.backward()

                if (step + 1) % cfg_optim.gradient_accumulation_steps == 0:
                    if cfg_optim.optimize_on_cpu:
                        if cfg_optim.fp16 and cfg_optim.loss_scale != 1.0:
                            # scale down gradients for fp16 training
                            for param in model.parameters():
                                if param.grad is not None:
                                    param.grad.data = param.grad.data / cfg_optim.loss_scale
                        is_nan = utils.set_optimizer_params_grad(
                            param_optimizer,
                            model.named_parameters(),
                            test_nan=True)
                        if is_nan:
                            logger.info(
                                "FP16 TRAINING: Nan in gradients, reducing loss scaling"
                            )
                            cfg_optim.loss_scale = cfg_optim.loss_scale / 2
                            model.zero_grad()
                            continue
                        optimizer.step()
                        utils.copy_optimizer_params_to_model(
                            model.named_parameters(), param_optimizer)
                    else:
                        optimizer.step()
                    model.zero_grad()

            f1 = evaluate(model, processor, cfg_optim, label_list, tokenizer,
                          device)
            if f1 > best_score:
                best_score = f1
                print('*f1 score = {}'.format(f1))
                flags = 0
                checkpoint = {'state_dict': model.state_dict()}
                torch.save(checkpoint, cfg_optim.model_save_pth)
            else:
                print('f1 score = {}'.format(f1))
                flags += 1
                if flags >= 6:
                    break

    model.load_state_dict(torch.load(cfg.model_save_pth)['state_dict'])
    test(model, processor, cfg_optim, label_list, tokenizer, device)
def delDict(id):
    opertorId = session.get("user")[u'id']
    dictM = dictService.get(id)
    userService.delete(dictM)
    logger.info("userId is %s del dict,dictInfo is %s", opertorId, str(dictM))
    return jsonify(dict(code=200))
def delUser(id):
    opertorId = session.get("user")[u'id']
    user = userService.get(id)
    userService.delete(user)
    logger.info("userId is %s del user,userInfo is %s", opertorId, str(user))
    return jsonify(dict(code=200))
Exemple #17
0
 def safe_delete(file_path):
   if os.path.isfile(file_path):
     os.remove(file_path)
     logger.info("deleted file {}".format(file_path))
Exemple #18
0
 def run(self):
   self.counter += 1
   logger.info("Heartbeat for every {} seconds. Total time is {}".format(self.internal, self.counter))
Exemple #19
0
from settings import MASTERS
from settings import MULTIM_PATH
from main import logger
from time import sleep
from tempfile import NamedTemporaryFile
from os import remove

import youtube_dl

import cleverbot
cb = cleverbot.CleverBot(user='******', key='Ueps0IU4HHVsvQUJ8F2L5NbTcYAiq5Xz', nick='discord')

from gtts import gTTS

if not discord.opus.is_loaded():
    logger.info('Opus not loaded')
    print('Opus not loaded')

class playback(object):
    def __init__(self, bot):
        self.bot = bot 
        self.voice = None
        self.player = None 
    @commands.command(pass_context=True)
    async def join(self, ctx):
        if ctx.message.author.id in MASTERS:
            channame = remove_command(ctx.message.content)
            for channel in ctx.message.server.channels:
                if channel.type == discord.ChannelType.voice and channel.name == channame.strip():
                    self.voice = await self.bot.join_voice_channel(channel)
        else:
Exemple #20
0
def cancel(bot, update):
    user = update.message.from_user
    update.message.reply_text('Bye! I hope we can talk again some day.',
                              reply_markup=ReplyKeyboardRemove())
    logger.info("User %s canceled the conversation.", user.first_name)
    return ConversationHandler.END
def db_load_profile_description(profile_id: str) -> bool:
    from main import logger

    # check profile delete status
    if not db_profile_available(profile_id=profile_id):
        logger.info(f'User {current_user} opened load of profile '
                    f'description for profile_id: {profile_id},'
                    f'but this profile deleted on site')
        return False
    else:
        logger.info(f'User {current_user} opened load of profile '
                    f'description for profile_id: {profile_id}')
    db_session = Session()
    # collect info from site
    profile_details = collect_info_from_profile(profile_id=profile_id)

    # delete old profile info
    if db_duplicate_check([ProfileDescription],
                          ProfileDescription.profile_id == profile_id):
        logger.info(f'User {current_user} deleted old profile '
                    f'description for profile_id: {profile_id}')
    # delete profile description
    db_delete_rows([ProfileDescription],
                   ProfileDescription.profile_id == profile_id)
    # delete old category level
    db_delete_rows([ProfileCategories],
                   ProfileCategories.profile_id == profile_id)
    # delete old languages
    db_delete_rows([ProfileLanguages],
                   ProfileLanguages.profile_id == profile_id)

    # create new profile info
    new_pr_desc = ProfileDescription(profile_id=profile_id)
    for i in range(len(profile_details)):
        key = list(profile_details.keys())[i]
        val = profile_details[key]
        if type(val) == str:
            if val.lower() == 'not specified':
                val = None
        if key == 'Languages':
            # add new languages
            for lang in profile_details[key].keys():
                language = lang
                level_name = profile_details[key][lang]
                # check if user not specified languages
                if language.lower() == 'not specified':
                    continue
                elif level_name.lower() == 'not specified':
                    level_name = None

                new_lang_lvl = ProfileLanguages(profile_id=profile_id,
                                                language=language,
                                                level_name=level_name)
                db_session.add(new_lang_lvl)
                db_session.commit()
            continue

        # check if key in categories table
        elif db_duplicate_check([Categories],
                                Categories.category_name == key.lower()):
            # add new category levels
            new_category_lvl = ProfileCategories(category_name=key.lower(),
                                                 profile_id=profile_id,
                                                 level_name=val)
            db_session.add(new_category_lvl)
            db_session.commit()
            continue
        setattr(new_pr_desc, key.lower(), val)
    db_session.close()
    db_session = Session()
    db_session.add(new_pr_desc)
    db_session.commit()
    logger.info(f'User {current_user} added profile '
                f'description for profile_id: {profile_id}')
    db_session.close()
    return True
Exemple #22
0
def change_password(uid, newpwd):
    p = bchash(newpwd)
    get_users().update_one({'_id': uid}, {'$set': {'bcpassword': p}})
    logger.info("password for %s changed!" % uid)
def create_invite(creator: User, invited_email: str, role: str) -> bool:
    from main import logger
    db_session = Session()
    invite = Invites(invite_id=uuid4().bytes)

    # new user creating
    new_user = Users(login=invited_email,
                     user_password=generate_password_hash(invite.invite_id,
                                                          "sha256",
                                                          salt_length=8))
    # assign role to user
    new_user_role = RolesOfUsers(login=invited_email, user_role=role)
    # create invite from user
    sent_invite_from = SentInvites(invite_id=invite.invite_id,
                                   login=creator.login)
    # create invite to user
    sent_invite_to = SentInvites(invite_id=invite.invite_id,
                                 login=new_user.login)

    # database duplicate check
    users = db_get_users(Users.login == invited_email)
    if len(users) > 0:
        if users[0]['role'] == 'deleted':
            db_session = Session()
            # create new invite id
            db_session.add(invite)
            db_session.commit()
            # create new users-invite link
            db_session.add(sent_invite_from)
            db_session.add(sent_invite_to)
            db_session.commit()
            # change password from deleted to new, based on invite_id
            update_q = update(Users).where(
                    Users.login == invited_email). \
                values(user_password=new_user.user_password)
            db_session.execute(update_q)
            db_session.commit()
            db_session.close()
            # reload users, for next checks
            users = db_get_users(Users.login == invited_email)
        # user already created
        if users[0]['register_status']:
            # user already registered
            logger.info(f'User {current_user.login} '
                        f'tried to create invite for '
                        f'already registered user: {invited_email}')
            return False
        if users[0]['role'] != role:
            # user role another from db role
            # check user role is valid
            query = db_session.query(UserRoles).filter(
                UserRoles.user_role == role)
            if len(query.all()) == 0:
                return False
            update_q = update(RolesOfUsers).where(
                    RolesOfUsers.login == invited_email). \
                values(user_role=role)
            db_session.execute(update_q)
            db_session.commit()
            logger.info(f'User {current_user.login} '
                        f'update role for unregistered user: {invited_email}')

        logger.info(f'User {current_user.login} '
                    f'resend invite to: {invited_email}')
        return True
    else:
        # no user in DB
        db_session.add(invite)
        db_session.commit()
        db_session.add(new_user)
        db_session.commit()
        db_session.add(new_user_role)
        db_session.commit()
        db_session.add(sent_invite_from)
        db_session.add(sent_invite_to)
        db_session.commit()
        db_session.close()
        logger.info(f'created invite for e-mail: {invited_email}')
        return True
Exemple #24
0
def convert_examples_to_features(examples,
                                 label_list,
                                 max_seq_length,
                                 tokenizer,
                                 show_exp=True):
    '''Loads a data file into a list of `InputBatch`s.

    Args:
        examples      : [List] 输入样本,包括question, label, index
        label_list    : [List] 所有可能的类别,可以是int、str等,如['book', 'city', ...]
        max_seq_length: [int] 文本最大长度
        tokenizer     : [Method] 分词方法

    Returns:
        features:
            input_ids  : [ListOf] token的id,在chinese模式中就是每个分词的id,对应一个word vector
            input_mask : [ListOfInt] 真实字符对应1,补全字符对应0
            segment_ids: [ListOfInt] 句子标识符,第一句全为0,第二句全为1
            label_id   : [ListOfInt] 将Label_list转化为相应的id表示
    '''
    label_map = {}
    for (i, label) in enumerate(label_list):
        label_map[label] = i

    features = []
    for (ex_index, example) in enumerate(examples):
        tokens_a = tokenizer.tokenize(example.text_a)

        tokens_b = None
        if example.text_b:
            tokens_b = tokenizer.tokenize(example.text_b)

        if tokens_b:
            # Modifies `tokens_a` and `tokens_b` in place so that the total
            # length is less than the specified length.
            # Account for [CLS], [SEP], [SEP] with "- 3"
            _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
        else:
            # Account for [CLS] and [SEP] with "- 2"
            if len(tokens_a) > max_seq_length - 2:
                tokens_a = tokens_a[0:(max_seq_length - 2)]

        # The convention in BERT is:
        # (a) For sequence pairs:
        #  tokens:   [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
        #  type_ids: 0   0  0    0    0     0       0 0    1  1  1  1   1 1
        # (b) For single sequences:
        #  tokens:   [CLS] the dog is hairy . [SEP]
        #  type_ids: 0   0   0   0  0     0 0
        #
        # Where "type_ids" are used to indicate whether this is the first
        # sequence or the second sequence. The embedding vectors for `type=0` and
        # `type=1` were learned during pre-training and are added to the wordpiece
        # embedding vector (and position vector). This is not *strictly* necessary
        # since the [SEP] token unambigiously separates the sequences, but it makes
        # it easier for the model to learn the concept of sequences.
        #
        # For classification tasks, the first vector (corresponding to [CLS]) is
        # used as as the "sentence vector". Note that this only makes sense because
        # the entire model is fine-tuned.
        tokens = []
        segment_ids = []
        tokens.append("[CLS]")
        segment_ids.append(0)
        for token in tokens_a:
            tokens.append(token)
            segment_ids.append(0)
        tokens.append("[SEP]")
        segment_ids.append(0)

        if tokens_b:
            for token in tokens_b:
                tokens.append(token)
                segment_ids.append(1)
            tokens.append("[SEP]")
            segment_ids.append(1)

        input_ids = tokenizer.convert_tokens_to_ids(tokens)

        # The mask has 1 for real tokens and 0 for padding tokens. Only real
        # tokens are attended to.
        input_mask = [1] * len(input_ids)

        # Zero-pad up to the sequence length.
        while len(input_ids) < max_seq_length:
            input_ids.append(0)
            input_mask.append(0)
            segment_ids.append(0)

        assert len(input_ids) == max_seq_length
        assert len(input_mask) == max_seq_length
        assert len(segment_ids) == max_seq_length

        label_id = None
        if example.label is not None:
            label_id = label_map[example.label]
        if ex_index < 5 and show_exp:
            logger.info("*** Example ***")
            logger.info("guid: %s" % (example.guid))
            logger.info("tokens: %s" % " ".join([str(x) for x in tokens]))
            logger.info("input_ids: %s" % " ".join([str(x)
                                                    for x in input_ids]))
            logger.info("input_mask: %s" %
                        " ".join([str(x) for x in input_mask]))
            logger.info("segment_ids: %s" %
                        " ".join([str(x) for x in segment_ids]))
            if example.label is not None:
                logger.info("label: %s (id = %d)" % (example.label, label_id))

        features.append(
            InputFeatures(input_ids=input_ids,
                          input_mask=input_mask,
                          segment_ids=segment_ids,
                          label_id=label_id))
    return features
Exemple #25
0
def register_pinger_result():
    """
    Register a pinger results in this master node
    The results came in JSON format and are related to the iperf
    session performed on the pinger node
    :return:
    """

    current_f_name = inspect.currentframe().f_code.co_name

    logger.info("{}: called".format(current_f_name))

    if not pipong_is_master():
        logger.info("{}: pipong_is_master:{}".format(current_f_name,
                                                     pipong_is_master()))
        return jsonify({
            'result': 'failure',
            'msg': 'this server is not a master'
        })

    data = request.get_json()

    ip_addr = request.remote_addr
    master_iteration_id = data['master_remote_id']
    pinger_port = data['local_port']
    pinger_result = data['result']

    registrered_t = db.session.query(models.RegisteredPingerNode).filter_by(
        address=ip_addr, api_port=pinger_port).first()

    if not registrered_t:
        logger.error(
            "{}: Error, the pinger node was not registered {}:{}".format(
                current_f_name, ip_addr, pinger_port))
        return jsonify({
            'result': 'failure',
            'msg': 'the pinger node was not registered'
        })

    pinger_iteration_t = db.session.query(
        models.MasterIterationPinger).filter_by(
        master_iteration_id=master_iteration_id,
        registered_pinger_id=registrered_t.id).first()

    if not pinger_iteration_t:
        logger.error("{}: Error, the master pinger iteration was not found. "
                     "Master iter:{} registered pinger:{}".format(
            current_f_name, master_iteration_id,
            registrered_t.id))
        return jsonify({
            'result': 'failure',
            'msg': 'the master pinger iteration was not found'
        })

    if pinger_iteration_t.status == "FINISHED":
        logger.error("{}: Error, the pinger iteration was finished. "
                     "Pinger iteration:{} status:{}".format(
            current_f_name, pinger_iteration_t.id,
            pinger_iteration_t.status))
        return jsonify({
            'result':
                'failure',
            'msg':
                ' the master pinger iteration is already finished'
        })

    s = db.session()
    for e in pinger_result:
        e['pinger_address'] = ip_addr

    pinger_iteration_t.result = str(pinger_result)
    pinger_iteration_t.status = "FINISHED"
    s.commit()

    logger.info(
        "{}: Pinger result registrered. Pinger address:{} result: {}".format(
            current_f_name, ip_addr, str(pinger_result)))

    res = tasks.master_tasks.check_master_iteration_done(master_iteration_id)
    logger.debug(
        "{}: check_master_iteration_done: {}".format(
            current_f_name, res))

    if res['is_finished']:

        # big info message on the logs for easy visualization
        logger.info("{}: ################################".format(current_f_name))
        logger.info("{}: # ITERATION id:{} FINISHED".format(current_f_name, master_iteration_id))
        logger.info("{}: ################################".format(current_f_name))

        # analyse last iteration results
        tasks.master_tasks.analyse_iteration.apply_async(args=[master_iteration_id], kwargs={})

    return jsonify({'result': 'success'})
Exemple #26
0
def after_request(response):
    timestamp = strftime('[%Y-%b-%d %H:%M]')
    logger.info(
        ('%s %s %s %s %s %s', timestamp, request.remote_addr, request.method,
         request.scheme, request.full_path, response.status))
    return response
Exemple #27
0
    def load(self, use_small=None):
        dataset_ret = []
        pbar = tqdm(total=len(self.dataset) if not use_small else use_small)
        badcase = 0

        for i, data in enumerate(self.dataset):
            if use_small and i == use_small:
                print('use_samll', i)
                break

            sentText = data['sentText']
            sentText = re.sub('\s+', ' ', sentText)  # 删除多于空格
            relation = data['relationMentions']
            entitys = data['entityMentions']

            # 构建entity到type的映射表
            entity_to_type = {}
            for entity in entitys:
                mentions = '<START>{}<END>'.format(entity['text']).split(' ')
                for mention in mentions:
                    entity_to_type[mention] = entity['label']

            for rel in relation:
                if self.sel_relation and rel['label'] not in self.sel_relation:
                    continue

                entity1 = rel['em1Text']
                entity2 = rel['em2Text']
                label = conver_token_to_id(rel['label'],
                                           self.config['label_dict'])

                # todo: 用之前的entity1 pattern entity2 的方法找, 然后去位置考前的
                ent1_ent2_search = re.search(
                    r'(?: |^)({0} (.*?) {1})(?: |$)'.format(entity1, entity2),
                    sentText)
                ent2_ent1_search = re.search(
                    r'(?: |^)({1} (.*?) {0})(?: |$)'.format(entity1, entity2),
                    sentText)
                if ent1_ent2_search and ent2_ent1_search:
                    find = ent1_ent2_search if ent1_ent2_search.start(
                    ) < ent2_ent1_search.start() else ent2_ent1_search
                    flag = 0 if ent1_ent2_search.start(
                    ) < ent2_ent1_search.start() else 1
                elif ent1_ent2_search:
                    find = ent1_ent2_search
                    flag = 0
                elif ent2_ent1_search:
                    find = ent2_ent1_search
                    flag = 1
                else:
                    badcase += 1
                    continue
                pattern = find.group(2)
                start, end = find.span(1)

                if flag == 0:
                    short = re.sub(r'^{} '.format(entity1),
                                   '<START>{}<END> '.format(entity1),
                                   find.group(1))
                    short = re.sub(r' {}$'.format(entity2),
                                   ' <START>{}<END>'.format(entity2), short)
                elif flag == 1:
                    short = re.sub(r'^{} '.format(entity2),
                                   '<START>{}<END> '.format(entity2),
                                   find.group(1))
                    short = re.sub(r' {}$'.format(entity1),
                                   ' <START>{}<END>'.format(entity1), short)
                sentence = sentText[:start] + short + sentText[end:]
                sentence = sentence.split(' ')

                positions = get_positions(sentence, flag,
                                          self.config['pos_max'])
                types = get_types(sentence, entity_to_type,
                                  self.config['type_dict'])

                # 构建每个词的id 和 att_label
                feature = []
                att_label = [0.] * len(sentence)  # 用于attetion regulation
                att_flag = 0
                for i, word in enumerate(sentence):
                    if word[:7] == '<START>':
                        word = word[7:]
                        att_label[i] = 1.
                        att_flag += 1
                    if word[-5:] == '<END>':
                        word = word[:-5]
                        att_label[i] = 1.
                        att_flag += 1
                    if att_flag > 1 and att_flag < 4:
                        att_label[i] = 1.
                    wordid = conver_token_to_id(word, self.config['vocab'])
                    feature.append(wordid)
                fenmu = sum(att_label)
                att_label = [f / fenmu for f in att_label]

                dataset_ret.append(
                    [feature, positions, types, att_label, label, pattern])
            pbar.update(1)
        pbar.close()
        logger.info('badcase: {}'.format(badcase))
        self.dataset = dataset_ret
        logger.info('load data: {}'.format(len(self.dataset)))
Exemple #28
0
def run(config, immediate=False):
    jobs = []
    # heartbeat job
    heartbeat = Heartbeat(1)
    jobs.append({"job": heartbeat, "interval": 1, "interval_unit": "minute"})

    base_config, sql_config, redis_config, qiniu_config, mongo_config = parse_config(
        config)
    qiniu_uploader = QiniuUploader(
        qiniu_config) if not qiniu_config.is_s3 else S3Uploader(qiniu_config)
    if sql_config:
        for config in sql_config:
            sql_backup_job = SqlBackupJob(base_config, config, qiniu_uploader)
            jobs.append({
                "job": sql_backup_job,
                "interval": config.interval,
                "interval_unit": config.interval_unit
            })
    if redis_config:
        redis_backup_job = RedisBackupJob(base_config, redis_config,
                                          qiniu_uploader)
        jobs.append({
            "job": redis_backup_job,
            "interval": redis_config.interval,
            "interval_unit": redis_config.interval_unit
        })
    if mongo_config:
        for config in mongo_config:
            mongo_backup_job = MongoBackupJob(base_config, config,
                                              qiniu_uploader)
            jobs.append({
                "job": mongo_backup_job,
                "interval": config.interval,
                "interval_unit": config.interval_unit
            })

    for job in jobs:
        interval = job["interval"]
        interval_unit = job['interval_unit']
        assert interval > 0
        assert interval_unit in ['second', 'minute', 'hour']
        # One-off; run immediately for every job
        if immediate:
            logger.info("Running {} jobs immediately".format(len(jobs)))
            for job in jobs:
                job["job"].run()
                time.sleep(1)
            logger.info("Finished all jobs")
            return
        else:
            if interval_unit == 'second':
                schedule.every(interval).seconds.do(job['job'].run)
            elif interval_unit == 'minute':
                schedule.every(interval).minutes.do(job['job'].run)
            elif interval_unit == 'hour':
                schedule.every(interval).hours.do(job['job'].run)
            logger.info("Scheduled all jobs!!!")

    while 1:
        schedule.run_pending()
        time.sleep(1)
Exemple #29
0
def get_device():
    """目前暂未支持分布式训练, 目前只做单机单显卡训练"""
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    n_gpu = torch.cuda.device_count()
    logger.info("device %s n_gpu %d ", device, n_gpu)
    return device, n_gpu
Exemple #30
0
    def newGame(self, level):

        logger.info("Starting a new game.")
        self.map = level
        # Display load screen
        self.window.mainmenu = None   # Kills the menu
        self.window.loading = True
        pyglet.gl.glClear(pyglet.gl.GL_COLOR_BUFFER_BIT)
        self.window.render(0)
        self.window.flip()
        # Remove old stuff from game window
        self.window.flushWindow()

        # Generates grid parameters for game instance
        self.tiles_no_build = []
        self.tiles_no_walk = []
        self.flightgrid = []
        self.generateGridSettings()
        self.grid = Grid(self)

        self.window.tile_renderer.update_offset()

        # Create particle emitters
        self.window.addParticleEmitters()

        # Lists of game objects
        for m in self.mobs:
            m.debuff_list = []
        self.mobs = []  # Enemy mob sprite objects
        self.mob_count = 0  # This serve as mob's id
        for t in self.towers:
            t.target = None
        self.goal, self.spawn = None, None
        self.towers = []  # Tower sprite objects
        self.window.animations = []
        self.selected_mouse = None
        self.dragging = False
        self.highlighted = []
        self.active_tower = None
        self.mouse_drag_tower = None
        self.pf_queue = []
        self.gold = 25
        self.ai_gold = 0
        self.ai_flat_income = 0
        try:
            pyglet.clock.unschedule(self.aiIncome)
        except:
            pass
        try:
            pyglet.clock.unschedule(self.updateState)
        except:
            pass
        try:
            pyglet.clock.unschedule(self.autospawnBalanced)
        except:
            pass
        pyglet.clock.schedule_interval(
                        self.aiIncome,
                        10.0
                    )
        pyglet.clock.schedule_interval(
                        self.autospawnBalanced,
                        0.25
                    )
        self.lives = 10
        pyglet.clock.unschedule(self.autospawnRandom)
        pyglet.clock.schedule_interval(self.pathFinding, 1.0/60.0)

        # Autospawn random mob every second
        self.autospawn = False

        self.grid.update(new="update")
        pyglet.clock.schedule_interval(self.updateState, 1.0/30.0)

        # Adding buttons to UI
        for b in ("1", "2", "3", "4", "gold_icon"):
            self.window.userinterface.addButton(b)
        self.window.userinterface.addText("gold")
        self.window.loading = False
        self.loaded = True
        self.paused = False
Exemple #31
0
 def wrapper(*args, **kwargs):
     logger.info('LOG: Running job "%s"' % inspect.getmodule(func))
     result = func(*args, **kwargs)
     logger.info('LOG: Job "%s" completed' % inspect.getmodule(func))
     return result
Exemple #32
0
def change_password(uid, newpwd):
    p = hashpwd(newpwd)
    get_users().update({'_id': uid}, {'$set': {'password': p}})
    logger.info("password for %s changed!" % uid)
def send_messages(profile_id_list: str,
                  looking_for: str = None,
                  photos_only: str = "off",
                  profiles: list = None) -> bool:
    """Send messages from profiles,
    checks how much messages each profile has already send today,
    calculate max available age with profile max_age_delta and profile age"""
    from main import logger
    # load profiles from DB
    profiles_list = db_get_rows(
        [
            Profiles.profile_id, Profiles.profile_password, Profiles.msg_limit,
            Profiles.max_age_delta, ProfileDescription.age,
            ProfileDescription.sex
        ], Profiles.profile_id.in_(profile_id_list), Profiles.profile_password,
        ProfileDescription.profile_id == Profiles.profile_id)
    for i in range(1, len(profiles_list) + 1):
        msg_have_sent_today = db_get_rows(
            [Messages.message_token],
            Profiles.profile_id == ChatSessions.profile_id,
            Messages.chat_id == ChatSessions.chat_id,
            Messages.send_time == datetime.now().date())
        msg_need_to_be_sent = profiles_list[i][2] - msg_have_sent_today

        # calculate max_age for message receivers
        date_of_birth_end = profiles_list[i][3] + profiles_list[i][4]

        profile_login, password = str(profiles_list[i][0]), \
                                  profiles_list[i][1]
        values = login(profile_login, password)
        # start page in site for search
        page = 1
        if not looking_for:
            if profiles_list[i][5] == 'male':
                looking_for = 'female'
            else:
                looking_for = 'male'
        if values:
            session, my_profile_id = values
            my_data = collect_info_from_profile(my_profile_id)
            logger.info(f"Profile with profile_id: {my_profile_id} "
                        f"start send messages")
            messages_has_sent = 0
            stop = False
            while messages_has_sent < msg_need_to_be_sent:
                if stop:
                    break
                if not profiles:
                    profiles = search_for_profiles(my_data["Sex"], looking_for,
                                                   my_data["Age"],
                                                   date_of_birth_end, page,
                                                   photos_only)
                profiles_id = get_id_profiles(profiles)
                profile_try_counter = 0
                page_try_counter = 0
                while len(profiles_id) == 0:
                    if profile_try_counter == 2:
                        page += 1
                        page_try_counter += 1
                    elif page_try_counter == 10:
                        logger.info(
                            f"Site don't show any profiles in 10 pages,"
                            f"messages sent from profile with"
                            f"profile_id: {my_profile_id} ended.")
                        messages_has_sent = \
                            msg_need_to_be_sent
                        break
                    profiles = search_for_profiles(my_data["Sex"], looking_for,
                                                   my_data["Age"],
                                                   date_of_birth_end, page,
                                                   photos_only)
                    profiles_id = get_id_profiles(profiles)
                    profile_try_counter += 1
                for profile_id in profiles_id:
                    check_response = check_for_filter(session, profile_id)
                    if check_response:
                        if check_response == "LIMIT OUT":
                            stop = True
                            break
                    else:
                        message_text = create_custom_message(
                            my_profile_id, profile_id)
                        if message_text is False:
                            logger.info(
                                f"Profile with profile_id {my_profile_id},"
                                f" without unused message templates")
                            messages_has_sent = \
                                msg_need_to_be_sent
                            break
                        message(session, profile_id, message_text)
                        dialog_download(observer_login=profiles_list[i][0],
                                        observer_password=profiles_list[i][1],
                                        sender_id=profiles_list[i][0],
                                        receiver_profile_id=profile_id)
                        messages_has_sent += 1
                        logger.info(
                            f"Successfully sent message to profile "
                            f"with profile_id: {profile_id}. "
                            f"Left to send: "
                            f" {msg_need_to_be_sent - messages_has_sent}")
                        if messages_has_sent == \
                                msg_need_to_be_sent:
                            logger.info(f"Profile with profile_id: "
                                        f"{my_profile_id}, "
                                        f"successfully sent messages")
                            break
                page += 1
    return True