Example #1
0
    def __init__(self):
        """
        When start, a bad situation is:
        ss was already serving some ports, then the password in database changed, and ssman
        started after that. Then there will be a password update problem.
        A solution is: remove all and add enabled, store _ports_working for passwords update
        """
        logger.warning('Start Ssman for ss-libev, now initializing...')
        self._ports_working = {}
        self._traffics_lasttime = {}
        self._traffics_to_log = {}
        self._traffic_sync_n = 0
        # fetch serving ports before ssman start
        ports_failed = Manager.remove_ports(Manager.get_working_ports())
        if ports_failed is not None:
            for p in ports_failed:
                self._ports_working.update({p: None})

        ports_enabled = db.get_enabled_ports()
        ports_failed = Manager.add_ports(ports_enabled)

        # and store the passwords for later sync passwords
        self._ports_working.update(ports_enabled)
        if ports_failed is not None:
            for p in ports_failed:
                if p in self._ports_working:
                    del self._ports_working[p]
        logger.info('Initial working ports: %s' % list(self._ports_working))
        logger.warning('Initialization done.')
Example #2
0
File: bddisk.py Project: XGMO9/tbv6
def share(path, period=0, pwd=''):
    def join_cookies(func_cookies):
        c = RequestsCookieJar()
        for cookie in func_cookies:
            c.set(cookie['name'], cookie['value'], path=cookie['path'], domain=cookie['domain'])
        return c

    data = {
        "path_list": f'["{path}"]',
        "period": f"{period}",
        "channel_list": "[]",
        "schannel": "4"
    }
    if pwd:
        data['pwd'] = pwd
    session = requests.Session()
    session.cookies.update(join_cookies(cookies))
    r = session.post(
        'https://pan.baidu.com/share/pset', params=params, data=data,
        headers=baidu_headers)
    try:
        r.raise_for_status()
        return r.json()['link']
    except Exception as e:
        logger.warning(e, exc_info=True)
        bot.send_message(const.GOD, '分享出现错误了,还是自己弄一下吧~')
Example #3
0
def get_move_set(pkmn_name):
    try:
        return set(all_move_sets[pkmn_name.lower()])
    except KeyError:
        logger.warning("{} does not have a move lookup".format(
            pkmn_name.lower()))
        return [constants.DO_NOTHING_MOVE]
Example #4
0
 def __init__(self, normal_accounts=None, contest_accounts=None):
     if not normal_accounts and not contest_accounts:
         logger.warning(
             'Neither normal_accounts nor contest_accounts has available account, '
             'submitter and crawler will not work')
     self._normal_accounts = normal_accounts or {}
     self._contest_accounts = contest_accounts or {}
Example #5
0
    def run(self):
        while 1:
            videos2d = self.get_videos2download()
            if not videos2d:
                self.sleep(SLEEP_TIME)
            else:
                for video in videos2d:
                    if video.for_test:
                        video_dir = local_videos4test_dir
                    else:
                        video_dir = local_videos_dir

                    local_path = os.path.join(video_dir,
                                              f'{video.vid}.{video.format}')
                    if not os.path.exists(local_path) or int(
                            os.path.getsize(local_path)) < int(video.size):
                        try:
                            self.download(video, local_path=local_path)
                        except Exception as e:
                            logger.warning(f"下载报错: {e}\n可能服务器端重启,请稍后。。。")
                    else:
                        try:
                            self.update_status_32(video)
                        except Exception as e:
                            logger.warning(f"更新状态报错: {e}\n可能服务器端重启,请稍后。。。")
def convert(weights: Optional[str]) -> None:
    """
    Model conversion. The selected model will be saved in keras format (*.h5), tf 1.x frozen graph (*.pb)
    and onnx (*.onnx). Model will be saved in folder "WEIGHTS_PATH/_(current date)/".

    :param weights: path to saved keras model weights.
    """
    keras.backend.set_learning_phase(0)
    save_path = os.path.join(WEIGHTS_PATH, get_date()[2:])
    os.makedirs(save_path, exist_ok=True)
    if weights is None:
        logger.warning(
            '\nNo weights provided. Converting random initialized model.\n')
        weights_name = 'random'
    else:
        weights_name = os.path.basename(weights)
        shutil.copyfile(weights, os.path.join(save_path, weights_name))
    model = get_model(weights)
    if weights is None:
        model.save(os.path.join(save_path, weights_name))
    frozen_model_path = freeze_keras_model(model, save_path,
                                           os.path.splitext(weights_name)[0],
                                           list(OUTPUT_NAMES))
    convert_frozen_graph_to_onnx(frozen_model_path, [INPUT_NAME],
                                 OUTPUT_NAMES,
                                 save_path,
                                 opset=None)
Example #7
0
 def __call__(self, score, model, epoch):
     """
     Call this instance when get a new score
     Args:
         score (float): the new score
         model:
     """
     # first call
     if self.record is None:
         self.record = score
         self.refreshed = True
         self.best_model = model
         self.best_epoch = epoch
     # not hit the best
     elif (not self.high_record and score > self.record + self.delta) or \
             (self.high_record and score < self.record - self.delta):
         self.counter += 1
         self.refreshed = False
         logger.info('EarlyStopping counter: {} out of {}'.format(
             self.counter, self.patience))
         if self.counter >= self.patience:
             self.early_stop = True
             logger.warning('Early stop')
     # hit the best
     else:
         self.record = score
         self.counter = 0
         self.refreshed = True
         self.best_model = model
         self.best_epoch = epoch
Example #8
0
    def loop(self):
        """The main receive/send loop."""

        while self._running:
            try:
                data = self._socket.recv(1024 * 8)
                self._buffer.write(data)

                if len(data) <= 0:
                    if self._running:
                        raise NodeDisconnected("Node disconnected.")
                    else:
                        # Looks like an intentional disconnect, just return
                        return

                # Loop while there's more data after the parsed message. The next message may be complete, in which
                # case we should read it right away instead of waiting for more data.
                while True:
                    data = self.read_message()
                    if data is None:
                        # Incomplete buffer, wait for more data
                        break

                    header, message, more_data = data
                    if hasattr(self, "handle_%s" % header.command):
                        getattr(self, "handle_%s" % header.command)(header, message)
                    if not more_data:
                        break
            except (InvalidChecksum, UnknownCommand) as e:
                logger.warning("Error parsing data packet: %s" % e.message, exc_info=sys.exc_info())
Example #9
0
    def update_session(self, sessionid, session):

        query = sql.SQL("""
            UPDATE {}
            SET
                slot = %(slot)s,
                ownerid = %(ownerid)s,
                model = %(model)s,
                customerid = %(customerid)s,
                due = %(due)s
            RETURNING id
        """).format(sql.Identifier(self.table))

        logger.info("Mogrify: {}".format(db.cursor.mogrify(query, session)))

        try:
            db.cursor.execute(query, session)
            db.conn.commit()
            fetch = db.cursor.fetchone()
            logger.debug("FETCH: {}".format(fetch))
            return fetch['id']
        except psycopg2.IntegrityError as e:
            logger.warning(e)
            db.conn.rollback()
            raise
        except psycopg2.ProgrammingError:
            logger.exception("!ERROR")
            db.conn.rollback()
            raise
        except Exception:
            db.conn.rollback()
            raise
Example #10
0
def parse_binance_deposits(path):
    f = maybe_open(path)
    if f is None: return []
    f = pd.read_excel(path)
    txs = []
    for i, row in f.iterrows():

        dt = dateutil.parser.parse(row['Date'])
        if dt.year > target_year:
            continue
        currency = row['Coin'].replace('BCHABC',
                                       'BCH').replace('BCC', 'BCH').replace(
                                           'BCHSV', 'BSV')
        try:
            price = prices.get_price(currency, dt)
        except Exception as e:
            logger.warning('Could not get price for row {}'.format(row))
            continue
        amount = row['Amount']
        dollar = price * amount

        txs.append({
            'dollar': dollar,
            'direction': 'in',
            'price': price,
            'amount': amount,
            'currency': currency,
            'timestamp': dt.timestamp(),
            'notes': 'binance deposit'
        })
    return txs
Example #11
0
def main():
    global running
    try:
        page, cid, uid = page_queue.get(timeout=1800)
    except Empty:
        page, cid, uid = 1, const.GOD, const.GOD
    running = True
    try:
        for post_info, magnets in ll.main(page):
            bot.send_message(const.GOD,
                             f'找到新文章[{post_info.title}]({post_info.url})',
                             parse_mode='markdown',
                             disable_notification=True)
            handler = MessageHandler(post_info, magnets)
            for magnet in magnets:
                with MessagesDB('main.db') as d:
                    res = d.search_uploaded(magnet)
                if res:
                    handler(res[0][3])
                else:
                    qbitmsg.qm.add(magnet, funcs=[handler])
    except Exception as e:
        logger.warning(e)
    finally:
        running = False
        logger.info("查找新发布已完成")
Example #12
0
    def find_best_switch(self):
        #build tree
        switchRoot = Tree()
        # find worst case move used on each possible switched in Pokemon
        battle_copy = deepcopy(self)
        battle_copy.opponent.lock_moves()
        try:
            pokemon_sets = get_pokemon_sets(battle_copy.opponent.active.name)
        except KeyError:
            logger.warning("No set for {}".format(battle_copy.opponent.active.name))
            return
        opponent_possible_moves = sorted(pokemon_sets[MOVES_STRING], key=lambda x: x[1], reverse=True)

        for reservePkm in self.user.reserve:
            if reservePkm.hp == 0:
                continue
            worstCase = 0
            for move in opponent_possible_moves:
                if move[0].startswith("hiddenpower"):
                    continue
                selfCopy = deepcopy(self)
                selfCopy.user.active = reservePkm
                state = selfCopy.create_state()
                damageEstimate = _calculate_damage(state.opponent.active,state.self.active,move[0])
                if damageEstimate != None:
                    if damageEstimate[0] > worstCase:
                        worstCase = damageEstimate[0]
            switchNode = Tree()
            switchNode.data = "switch " + reservePkm.name
            switchNode.maximinScore = worstCase*-0.667
            switchRoot.children.append(switchNode)

        # traverse Tree with root switchRoot
        return treeTraversalDFS(switchRoot)
Example #13
0
    def put_taskmng_in_queue(self, task):
        query = sql.SQL("""
            INSERT INTO taskmng_queue
            (sessionuid, sessiontaskid, taskname, action, scheduled_on)
            VALUES( %s, %s, %s, %s, %s)
            RETURNING sessiontaskid
        """)

        params = (task['sessionuid'], task['session_taskid'],
                  task['session_task_name'], task['action'],
                  datetime.datetime.utcnow())

        try:
            db.cursor.execute(query, params)
            db.conn.commit()
            fetch = db.cursor.fetchone()
            logger.debug("FETCH: {}".format(fetch))
            return fetch['sessiontaskid']
        except psycopg2.IntegrityError as e:
            logger.warning(e)
            db.conn.rollback()
        except psycopg2.ProgrammingError:
            logger.exception("!ERROR")
            db.conn.rollback()
        except Exception:
            db.conn.rollback()
            raise
Example #14
0
    def loop(self):
        """The main receive/send loop."""

        while self._running:
            try:
                data = self._socket.recv(1024 * 8)
                self._buffer.write(data)

                if len(data) <= 0:
                    if self._running:
                        raise NodeDisconnected("Node disconnected.")
                    else:
                        # Looks like an intentional disconnect, just return
                        return

                # Loop while there's more data after the parsed message. The next message may be complete, in which
                # case we should read it right away instead of waiting for more data.
                while True:
                    data = self.read_message()
                    if data is None:
                        # Incomplete buffer, wait for more data
                        break

                    header, message, more_data = data
                    if hasattr(self, "handle_%s" % header.command):
                        getattr(self, "handle_%s" % header.command)(header,
                                                                    message)
                    if not more_data:
                        break
            except (InvalidChecksum, UnknownCommand) as e:
                logger.warning(
                    "Error parsing data packet: %s" % e.message,
                    exc_info=sys.exc_info(),
                )
Example #15
0
    def dump_src(self, src, force=False, skip_manual=False, schedule=False, check_only=False, **kwargs):
        if src in self.register:
            klasses = self.register[src]
        else:
            raise DumperException("Can't find '%s' in registered sources (whether as main or sub-source)" % src)

        jobs = []
        try:
            for i,klass in enumerate(klasses):
                if issubclass(klass,ManualDumper) and skip_manual:
                    logging.warning("Skip %s, it's a manual dumper" % klass)
                    continue
                crontab = None
                if schedule:
                    if klass.SCHEDULE:
                        crontab = klass.SCHEDULE
                    else:
                        raise DumperException("Missing scheduling information")
                job = self.job_manager.submit(partial(self.create_and_dump,klass,force=force,job_manager=self.job_manager,
                    check_only=check_only,**kwargs),schedule=crontab)
                jobs.append(job)
            return jobs
        except Exception as e:
            logging.error("Error while dumping '%s': %s" % (src,e))
            raise
    def get_playlist(self, name):
        user_playlists = self._get(PLAYLISTS_URL)
        playlist = None

        # TODO: Handle pagination of results
        for user_playlist in user_playlists['items']:
            if user_playlist['name'] == name:
                playlist = user_playlist
                break

        if not playlist:
            logger.warning(
                "Unable to find playlist '{}'. Is it public?".format(name))
            return None

        # Exchange the minified playlist for a full playlist
        playlist = self._get(playlist['href'])

        # Once we've found the playlist, get all it's tracks
        next_results_url = playlist['tracks']['next']

        # Spotify paginates long results
        while next_results_url:
            paginated_results = self._get(next_results_url)
            next_results_url = paginated_results['next']

            playlist['tracks']['items'] += paginated_results['items']

        return Playlist.from_spotify(playlist)
Example #17
0
    def run(self) -> None:
        # logger.warning('This is a development server. Do not use it in a production deployment.')
        try:
            for task_name in self.deploy_cluster:
                try:
                    schedule.every(self.crontab['action']).minutes.do(self.push_task, task_name=task_name)
                    if ENABLE_DDT:
                        schedule.every(self.crontab['refresh']).minutes.do(self.rc.refresh,
                                                                           key_name=REDIS_SECRET_KEY.format(task_name))
                        logger.success(f"START DDT -- {task_name}")
                    else:
                        logger.warning(f'Not Authorized -- DDT({task_name})')
                    logger.success(f"START TASK -- {task_name}/crontab:{self.crontab['action']} minutes")

                except schedule.IntervalError:
                    logger.error('interval set error')

                self.crontab['action'] += 5

            while True:
                schedule.run_pending()
                time.sleep(1)

        except Exception as err:
            logger.exception('Exception occurred ||{}'.format(err))
            noticer.send_email(text_body='{}'.format(err), to='self')
        except KeyboardInterrupt as err:
            logger.stop('Forced stop ||{}'.format(err))
Example #18
0
 def retry_request(self, url):
     try:
         # 启用代理
         # proxy = self..get_proxy()
         # 启用user-agent
         headers = self.get_user_agent()
         # if proxy:
         #     proxies = {
         #         'http': 'http://' + proxy,
         #         'https': 'https://' + proxy
         #     }
         #     # 超时的时候回报错并重试
         #     response = requests.get(url, headers=headers, proxies=proxies, timeout=3)
         #     logger.info("use proxy <{}> success!".format(proxies))
         # else:
         response = requests.get(url, headers=headers, timeout=3)
         # 状态码不是200,也会报错并重试
         assert response.status_code == 200
         # 得到json_str
         json_str = response.content.decode()
         # 将json文件转换为Python文件
         dic = json.loads(json_str)
         self.count = 1
         return dic
     except:
         print("<{}> retry {} times".format(url, self.count))
         logger.warning("<{}> retry {} times".format(url, self.count))
         logger.error("<{}> get response failed.".format(url))
         self.count += 1
     raise ("<{}> get response failed.".format(url))
Example #19
0
def start_http_server(agent_input, table_input):
    """[Start http agent server]

    Arguments:
        agent_input {[Agent]} -- [netsnmpagent.netsnmpAgent]
        table_input {[dict]} -- [netsnmpagent.netsnmpAgent.table]
    """
    global AGENT, MIB_TABLE, NAMED_CONFIGURATION
    AGENT = agent_input
    MIB_TABLE = table_input

    logger.info("Start http agent server")
    httpd = None
    try:
        NAMED_CONFIGURATION = named.NamedConfiguration()
        NAMED_CONFIGURATION.load_configuration()
        if NAMED_CONFIGURATION.file_excution.contents is None:
            logger.warning("start_http_server: {}".format(str(c_except.NamedNotExist())))
        AgentServer.set_default_stats()
        MIB_TABLE[TableOidStr.STAT_PER_CLIENT]["table_value"] = MIB_TABLE[TableOidStr.STAT_PER_CLIENT]["table"].value()
        MIB_TABLE[TableOidStr.STAT_PER_SERVER]["table_value"] = MIB_TABLE[TableOidStr.STAT_PER_SERVER]["table"].value()
        MIB_TABLE[TableOidStr.STAT_PER_VIEW]["table_value"] = MIB_TABLE[TableOidStr.STAT_PER_VIEW]["table"].value()
        httpd = HTTPAgnetServer(
            (HTTP_CONFIGURATION['host'], HTTP_CONFIGURATION['port']), AgentServer)
        httpd.allow_reuse_address = True
        httpd.serve_forever()
    except Exception as ex:
        logger.error("start_http_server error: {}".format(ex))
        logger.error(traceback.format_exc())
        logger.info("Shutdown http agent server")
        if httpd:
            httpd.close_request() 
Example #20
0
 def add_move(self, move_name: str):
     if normalize_name(move_name) in [m.name for m in self.moves]:
         return
     try:
         self.moves.append(Move(move_name))
     except KeyError:
         logger.warning("{} is not a known move".format(move_name))
Example #21
0
 def compute_hiperspaces(self):
     # La característica heurística al buscar conexiones entre
     # diferentes clusters hace que pueda fallar
     # por lo que redirigimos la salida para ser silenciosos
     # en esos casos
     if not len(self.points) > 0:
         logger.error('No points to compute hull!')
         raise Exception('No points to compute hull!')
     stderr_fd = sys.stderr.fileno()
     with open('/tmp/qhull-output.log', 'w') as f, stderr_redirected(f):
         points = list(self.points)
         logger.info('Searching for hull in dimension %s based on %s points',
                 len(points[0]),len(points))
         output = qconvex('n',points)
         if len(output) == 1:
             logger.debug('Could not get Hull. Joggle input?')
     try:
         dim, facets_nbr, facets = self.__parse_hs_output(output)
     except IncorrectOutput:
         logger.warning('Could not get hull')
         raise CannotGetHull()
     logger.info('Found hull in dimension %s of %s facets',
             dim,len(facets))
     self.dim = dim
     self.facets = facets
     if self.verbose:
         print "Computed MCH with ",facets_nbr," halfspaces"
         print 'This are them:\n'
         for facet in self.facets:print facet
     return self.dim
Example #22
0
    def rebuild():
        rebuild_events_queue.empty()
        periodic_items_queue.empty()
        schedule.clear('ads')

        primary_playlist.build()
        special_playlist.build()

        # Choose current playlist
        if special_playlist.is_empty():
            selected_playlist = primary_playlist
            adverts_playlist.build()

            if primary_playlist.is_empty():
                # Only run ads if there's other content
                if not adverts_playlist.is_empty():
                    logger.warning(
                        'Ads will run only when there is other content.')
            else:
                for item in adverts_playlist.get_items():
                    logger.info((
                        'Scheduling {0.path} to run every {0.source.play_every_minutes} minute(s).'
                    ).format(item))

                    def enqueue(item=item):
                        return periodic_items_queue.put_nowait(item)

                    schedule.every(item.source.play_every_minutes).minutes.do(
                        enqueue).tag('ads')
        else:
            logger.info('Playing %s playlist instead of everything else.' %
                        special_playlist.name)
            selected_playlist = special_playlist

        rebuild_events_queue.put_nowait(selected_playlist)
Example #23
0
def move(battle, split_msg):
    move_name = normalize_name(split_msg[3].strip().lower())

    if is_opponent(battle, split_msg):
        side = battle.opponent
        pkmn = battle.opponent.active
    else:
        side = battle.user
        pkmn = battle.user.active

    # add the move to it's moves if it hasn't been seen
    # decrement the PP by one
    # if the move is unknown, do nothing
    move_object = pkmn.get_move(move_name)
    if move_object is None:
        new_move = pkmn.add_move(move_name)
        if new_move is not None:
            new_move.current_pp -= 1
    else:
        move_object.current_pp -= 1
        logger.debug("{} already has the move {}. Decrementing the PP by 1".format(pkmn.name, move_name))

    # if this pokemon used two different moves without switching,
    # set a flag to signify that it cannot have a choice item
    if (
            is_opponent(battle, split_msg) and
            side.last_used_move.pokemon_name == side.active.name and
            side.last_used_move.move != move_name
    ):
        logger.debug("{} used two different moves - it cannot have a choice item".format(pkmn.name))
        pkmn.can_have_choice_item = False
        if pkmn.item in constants.CHOICE_ITEMS:
            logger.warning("{} has a choice item, but used two different moves - setting it's item to UNKNOWN".format(pkmn.name))
            pkmn.item = constants.UNKNOWN_ITEM

    try:
        category = all_move_json[move_name][constants.CATEGORY]
        logger.debug("Setting {}'s last used move: {}".format(pkmn.name, move_name))
        side.last_used_move = LastUsedMove(
            pokemon_name=pkmn.name,
            move=move_name
        )
    except KeyError:
        category = None
        side.last_used_move = LastUsedMove(
            pokemon_name=pkmn.name,
            move=constants.DO_NOTHING_MOVE
        )

    # if this pokemon used a damaging move, eliminate the possibility of it having a lifeorb
    # the lifeorb will reveal itself if it has it
    if category in constants.DAMAGING_CATEGORIES and not any([normalize_name(a) in ['sheerforce', 'magicguard'] for a in pokedex[pkmn.name][constants.ABILITIES].values()]):
        logger.debug("{} used a damaging move - not guessing lifeorb anymore".format(pkmn.name))
        pkmn.can_have_life_orb = False

    # there is nothing special in the protocol for "wish" - it must be extracted here
    if move_name == constants.WISH and 'still' not in split_msg[4]:
        logger.debug("{} used wish - expecting {} health of recovery next turn".format(side.active.name, side.active.max_hp/2))
        side.wish = (2, side.active.max_hp/2)
Example #24
0
 def add_move(self, move_name: str):
     try:
         new_move = Move(move_name)
         self.moves.append(new_move)
         return new_move
     except KeyError:
         logger.warning("{} is not a known move".format(move_name))
         return None
Example #25
0
def get_most_likely_spread(pkmn_name):
    try:
        sets = get_pokemon_sets(pkmn_name)
    except KeyError:
        logger.warning("{} not in the sets lookup".format(pkmn_name))
        return 'serious', "85,85,85,85,85,85", 0

    return sets[SPREADS_STRING][0]
Example #26
0
def get_most_likely_ability(pkmn_name):
    try:
        sets = get_pokemon_sets(pkmn_name)
    except KeyError:
        logger.warning("{} not in the sets lookup, using random battle abilities".format(pkmn_name))
        return get_most_likely_ability_for_random_battle(pkmn_name)

    return sets[ABILITY_STRING][0][0]
Example #27
0
async def on_shutdown(app):
    logger.warning('Shutting down..')
    # insert code here to run it before shutdown

    # Remove webhook (not acceptable in some cases)
    await bot.delete_webhook()

    logger.warning('Bye!')
Example #28
0
def main():
    try:
        asyncio.run(main_loop())
    except KeyboardInterrupt as ex:
        print()
        logger.warning(f'Caught Ctrl-C, exiting gracefully.')
        asyncio.run(shutdown())
        sys.exit(0)
Example #29
0
 def import_status(self):
     status_fname = os.path.join(self.FLAGS.log_path, 'data_status.txt')
     try:
         with open(status_fname, 'r') as f:
             self.status = edict(json.load(f))
             self.prgbar.Restore(self.status.start_idx)
     except:
         logger.warning('No status file found, will create new')
Example #30
0
def get_most_likely_spread_for_standard_battle(pkmn_name):
    try:
        sets = _get_standard_battle_set(pkmn_name)
    except KeyError:
        logger.warning("{} not in the sets lookup".format(pkmn_name))
        return 'serious', "85,85,85,85,85,85"

    return sets[spreads_string][0]
Example #31
0
def push_2_que(que, ctx):
    """像指定的que push 数据"""
    try:
        redis_cli = redis_conn()
        if redis_cli is not None:
            redis_cli.lpush(que, ctx)
    except Exception as e:
        logger.warning('Redis推送数据出错,\t{0}'.format(e))
Example #32
0
def parse_lxml(html):
    """解析html"""
    selector = None
    try:
        selector = etree.HTML(html)
    except:
        logger.warning('lxml解析html时候出错')
    return selector
Example #33
0
 def smt_simplify(self, sol):
     facets = set()
     if sol:
         for p_id, place in enumerate(self.facets):
             normal = []
             ti = sol[Int("b%s"%(p_id))].as_long()
             for t_id, val in enumerate(place.normal):
                 smt_coeff = Int("a%s,%s" % (p_id,t_id))
                 normal.append(sol[smt_coeff].as_long())
             if sum(abs(x) for x in normal) != 0:
                 facets.add(Halfspace(normal, ti))
             else:
                 logger.warning('Made all coefficients zero...weird!')
         self.facets = list(facets)
Example #34
0
def send_email(msg, recipients):
    flag = True
    while flag:
        if config.fail_conf().report == True:
            if recipients == "admins":
                if config.email_conf().admin_recipients == ['']:
                    try:
                        raise LookupError("No administrator emails configured, sending mail failed.")
                    except:
                        log.exception("No administrator emails configured, sending mail failed.")
                        flag = False
                        break
                message = MIMEText(msg.greeter_admin + msg.body)
                message['To'] = ', '.join(config.email_conf().admin_recipients)
            elif recipients == "users":
                if config.email_conf().user_recipients == ['']:
                    try:
                        raise LookupError("No user emails configured, sending mail failed.")
                    except:
                        log.exception("No user emails configured, sending mail failed.")
                        flag = False
                        break
                message = MIMEText(msg.greeter_user + msg.body)
                message['To'] = ', '.join(config.email_conf().user_recipients)
            message['From'] = config.email_conf().sender
            message['Subject'] = msg.subject
            if config.email_conf().smtp_conn in ('ssl', 'tls'):
                if config.email_conf().smtp_user == None or config.email_conf().smtp_pass == None:
                    log.warning("SMTP Connection type is configured as %s, but a username and password hasn't been configured. This method require login credentials. Trying plain text connection instead." % config.email_conf().smtp_conn.upper())
                    config.email_conf().smtp_conn = None
            try:
                if config.email_conf().smtp_conn == 'ssl':
                    send = smtplib.SMTP_SSL(config.email_conf().smtp_server, config.email_conf().smtp_port)
                else:
                    send = smtplib.SMTP(config.email_conf().smtp_server, config.email_conf().smtp_port)
                if config.email_conf().smtp_conn == 'tls':
                    send.ehlo()
                    send.starttls()
                    send.ehlo()
                    try:
                        send.login(config.email_conf().smtp_user, config.email_conf().smtp_pass)
                    except:
                        pass
                send.sendmail(config.email_conf().sender, message['To'], message.as_string())
                send.close()
            except:
                log.exception("SMTPLIB failed to send email, please check your connection and configuration")
            flag = False
Example #35
0
    def load(self,basedir,pkg,generate_allways=False):
        """
        loads or generates a pkginfo for java package pkg src, located
        in basedir

        pkg can also be a path to directory containing the pkg sources
        """

        dirname = self.pkg2dir(basedir,pkg)
        fn = os.path.join(dirname,PKGDATA)

        if not generate_allways and os.path.isfile(fn):
            try:
                p = yaml.load(open(fn))
                version = getattr(p,'pkginfo_version',None)
                #regenerate, if version mismatch with stored info
                if  version == VERSION: return p
                logger.debug("pkginfo: version missmatch %s,%s",VERSION,version)
                return self.create(basedir,pkg)
            except Exception,e:
                logger.warning("exception during pkginfo load: %s",e)