예제 #1
0
def resolve_positions(symbol):
	logger.info('Resolve positions')

	# Cancel open orders.
	logger.info('Cancel all orders')
	try:
		bitmex_client.Order.Order_cancelAll(symbol=symbol)
	except Exception:
		logger.error(traceback2.format_exc())
		return

	# Close open positions.
	logger.info('Query positions to close')
	try:
		positions = bitmex_client.Position.Position_get(filter=json.dumps({"symbol": "XBTUSD"})).result()
	except Exception:
		logger.error(traceback2.format_exc())
		return

	for pos in positions[0]:
		# Skip over flat positions.
		if pos['currentQty'] == 0: continue

		# Unwind position by multiplying currentQty by -1.
		symbol = pos['symbol']
		order_qty = -1 * pos['currentQty']
		place_order(symbol, order_qty)
예제 #2
0
def place_order(symbol, quantity, decision_px, position=None):
    logger.info(f'Place order: symbol:{symbol} quantity:{quantity}')

    # DO NOT use API method Order.Order_newBulk() to post both the new order and the new stop-limit order.
    # Oddly, the API for bulk orders fails when the ordType differs between orders.

    # Support up to 5 retries in case Bitmex rejects our order due to load-shedding policy.
    attempts, new_order = 0, None
    while attempts < 5 and not new_order:
        logger.info(f'Attempt to place order on attempt:{attempts}')
        try:
            new_order = bitmex_client.Order.Order_new(
                symbol=symbol, orderQty=quantity, ordType='Market').result()
            break
        except bravado.exception.HTTPServiceUnavailable:
            # XXX we should check the error message to be sure the "overloaded" condition happened.
            logger.error(traceback2.format_exc())
        except Exception:
            logger.error(traceback2.format_exc())
            break
        # Bitmex has rejected order due to load-shedding policy. Per docs, wait 500 ms, then retry. Note: this is a blocking sleep().
        sleep(0.5)
        attempts += 1
    if not new_order:
        failure_message = 'from overload condition' if attempts >= 1 else None
        broadcast_order_failure(symbol,
                                quantity,
                                decision_px,
                                position=position,
                                failure_message=failure_message)
        return StatusCode.ERROR

    # Persist new order to data store.
    create_timestamp = new_order[0]['timestamp'].timestamp()
    insert_order(new_order[0]['orderID'], new_order[0]['symbol'],
                 new_order[0]['price'], new_order[0]['side'],
                 new_order[0]['orderQty'], 'Market', create_timestamp,
                 decision_px)

    # Register realized P&L only when a position is closed.
    realized_pnl = None
    if position:
        realized_pnl = (1.0 / position['avgEntryPrice'] -
                        1.0 / new_order[0]['avgPx']) * abs(quantity)
        realized_pnl *= (1.0 - position['commission'])

    # Message interested parties about trade. Include trade-price if order filled.
    broadcast_message(symbol,
                      quantity,
                      new_order[0]['avgPx'],
                      decision_px=decision_px,
                      realized_pnl=realized_pnl,
                      ord_status=new_order[0]['ordStatus'])

    # Initiate stop order if we are not closing a position.
    if not position:
        entry_px = float(new_order[0]['avgPx'])
        place_stop_order(symbol, -quantity, entry_px, decision_px)

    return StatusCode.OK
예제 #3
0
def insert_fill_conditionally(trade):
	logger.info(f"Determine if trade already exists: execID:{trade['execID']}")
	already_exists = False
	try:
		with connection.cursor() as cursor:
			sql = "SELECT exec_id FROM fills WHERE exec_id = %s"
			cursor.execute(sql, (trade['execID']))
			exec_id = cursor.fetchone()
			already_exists = not exec_id == None
	except Exception:
		logger.error(traceback2.format_exc())
		return

	if already_exists: return

	logger.info(f"Inserting fill: execID:{trade['execID']} orderID:{trade['orderID']} symbol:{trade['symbol']} side:{trade['side']} price:{trade['price']} orderQty:{trade['orderQty']} ordType:{trade['ordType']} ordStatus:{trade['ordStatus']} lastPx:{trade['lastPx']} lastQty:{trade['lastQty']} leavesQty:{trade['leavesQty']} cumQty:{trade['cumQty']} avgPx:{trade['avgPx']}")
	tx_time = trade['transactTime'].timestamp()
	try:
		with connection.cursor() as cursor:
			sql = "INSERT INTO `fills` (`transaction_dt`, `exec_id`, `order_id`, `symbol`, `side`, `price`, `order_qty`, `order_type`, `order_status`, `last_px`, `last_qty`, `leaves_qty`, `cum_qty`, `avg_px`, `created_dt`) VALUES (FROM_UNIXTIME(%s), %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, NOW())"
			cursor.execute(sql, (tx_time, trade['execID'], trade['orderID'], trade['symbol'], trade['side'], trade['price'], trade['orderQty'], trade['ordType'], trade['ordStatus'], trade['lastPx'], trade['lastQty'], trade['leavesQty'], trade['cumQty'], trade['avgPx']))
			connection.commit()
	except Exception:
		logger.error(traceback2.format_exc())
		return
예제 #4
0
def reconcile_fills_and_positions():
	logger.info('Reconciling fills and positions')

	now = datetime.utcnow().timestamp()
	start_time = now - 60*60 # one hour ago
	start_time_dt = datetime.fromtimestamp(start_time)
	start_time_str = start_time_dt.strftime("%Y-%m-%d %H:%M")

	# Look back 1 hour. The pitfall here is that there is network outage longer than that.
	# The count of 64 for results should capture all trades, most of which usually will have
	# already been persisted to our backend.

	logger.info('Query trade history')
	try:
		trade_history = bitmex_client.Execution.Execution_getTradeHistory(symbol='XBTUSD', count=64, reverse=True, filter=json.dumps({"execType": "Trade", "startTime": start_time_str})).result()
	except Exception:
		logger.error(traceback2.format_exc())
		return

	for trade in trade_history[0]:
		insert_fill_conditionally(trade)

	logger.info('Query positions to persist')
	try:
		positions = bitmex_client.Position.Position_get(filter=json.dumps({'symbol': 'XBTUSD'})).result()
	except Exception:
		logger.error(traceback2.format_exc())
		return
		
	for pos in positions[0]:
		insert_position(pos)
예제 #5
0
def reconcile_fills_and_positions():
    logger.info('Reconciling fills and positions')

    # Scan trade history up to 1 minute ago, as we should call this method each minute.
    now = datetime.utcnow().replace(tzinfo=timezone.utc).timestamp()
    start_time = now - 60
    start_time_dt = datetime.fromtimestamp(start_time, tz=timezone.utc)
    start_time_str = start_time_dt.strftime("%Y-%m-%d %H:%M")

    # Look back 1 minute. The pitfalls here are (a) that there is network outage longer than that,
    # or (b) that a trade executes during the small window between 1 minute ago and when the
    # trade-history query was last done (which might well be more than 1 minute ago).
    # The count of 64 for results is arbitrary and conceivably could miss some fills.

    logger.info('Query trade history')
    try:
        trade_history = bitmex_client.Execution.Execution_getTradeHistory(
            symbol='XBTUSD',
            count=64,
            reverse=True,
            filter=json.dumps({
                "execType": "Trade",
                "startTime": start_time_str
            })).result()
    except Exception:
        logger.error(traceback2.format_exc())
        return

    for trade in trade_history[0]:
        insert_fill_conditionally(trade)
        # Broadcast status of Stop orders if (i) order was completely filled or (ii) order was cancelled.
        case1 = trade['ordStatus'] == 'Filled' and trade['cumQty'] == trade[
            'orderQty']
        case2 = trade['ordStatus'] == 'Canceled'
        if trade['ordType'] == 'Stop' and (case1 or case2):
            # Use negative quantity for sell-side trades.
            quantity = trade['cumQty'] if trade[
                'side'] == 'Buy' else -trade['cumQty']
            broadcast_message(trade['symbol'],
                              quantity,
                              trade['avgPx'],
                              stop_px=trade['stopPx'],
                              ord_status=trade['ordStatus'])

    logger.info('Query positions to persist')
    try:
        positions = bitmex_client.Position.Position_get(
            filter=json.dumps({'symbol': 'XBTUSD'})).result()
    except Exception:
        logger.error(traceback2.format_exc())
        return

    for pos in positions[0]:
        insert_position(pos)
예제 #6
0
def check_proof(item, *, rewrite):
    if item.steps:
        context.set_context(None, vars=item.vars)
        state = server.parse_init_state(item.prop)
        history = state.parse_steps(item.steps)
        if rewrite:
            with global_setting(unicode=True):
                item.proof = state.export_proof()

        for step in history:
            if 'error' in step:
                return {
                    'status': 'Failed',
                    'err_type': step['error']['err_type'],
                    'err_str': step['error']['err_str'],
                    'trace': step['error']['trace']
                }

        try:
            state.check_proof()
        except Exception as e:
            return {
                'status': 'Failed',
                'err_type': e.__class__.__name__,
                'err_str': str(e),
                'trace': traceback2.format_exc()
            }

        # Otherwise OK
        return {
            'status': 'OK' if len(state.rpt.gaps) == 0 else 'Partial',
            'num_steps': len(item.steps),
        }
    elif item.proof:
        try:
            context.set_context(None, vars=item.vars)
            state = server.parse_proof(item.proof)
            state.check_proof(no_gaps=True)
        except Exception as e:
            return {
                'status': 'ProofFail',
                'err_type': e.__class__.__name__,
                'err_str': str(e),
                'trace': traceback2.format_exc()
            }
        
        return {
            'status': 'ProofOK'
        }
    else:
        return {
            'status': 'NoSteps'
        }
예제 #7
0
 async def on_ready(self):
     try:
         print(f"Logged in to {bot.user}")
         if self.user.id == 644065524879196193:
             await self.get_channel(info["ERROR_CHANNEL"]).send("Logged in")
         if platform.system() != "Windows":
             import uvloop, nest_asyncio
             if not discord.opus.is_loaded():
                 try:
                     discord.opus.load_opus("heroku-buildpack-libopus")
                 except:
                     pass
             nest_asyncio.apply()
             uvloop.install()
             loop = asyncio.get_event_loop()
             self.loop.add_signal_handler(
                 signal.SIGTERM,
                 lambda: loop.run_until_complete(self.on_sigterm()))
         database_channel = self.get_channel(736538898116902925)
         database_msg = await database_channel.fetch_message(
             database_channel.last_message_id)
         database_file = database_msg.attachments[0]
         db_byte = await database_file.read()
         db_dict = json.loads(db_byte)
         self.ADMIN = db_dict["role"]["ADMIN"]
         self.BAN = db_dict["role"]["BAN"]
         self.Contributor = db_dict["role"]["Contributor"]
         self.database = db_dict["user"]
         self.global_chat = db_dict["global_chat"]
         self.api_index = db_dict["system"]["api_index"]
         self.maintenance = db_dict["system"]["maintenance"]
         self.save_database.start()
     except:
         print(traceback2.format_exc())
예제 #8
0
 async def global_chat(self, ctx):
     try:
         text = ctx.message.content.split(" ", 1)
         channel_id: int
         if len(text) == 1:
             channel_id = ctx.channel.id
         else:
             result = re.search(self.channel_match, text[1])
             if result is None:
                 return await ctx.send("チャンネルを検出できませんでした")
             else:
                 matched_channel_id = int(result["channel_id"])
                 channel = discord.utils.get(ctx.guild.text_channels,
                                             id=matched_channel_id)
                 if channel is None:
                     return await ctx.send("チャンネルを検出できませんでした")
                 channel_id = channel.id
         if channel_id in self.bot.global_chat["general"]:
             return await ctx.send("すでにグローバルチャットに登録されているチャンネルです")
         self.bot.global_chat["general"].append(channel_id)
         with open("./GLOBAL_CHAT.json", 'w') as db:
             json.dump(self.bot.global_chat, db, indent=2)
         await ctx.send(f"<#{channel_id}>チャンネルをグローバルチャットに追加しました.")
     except:
         await ctx.send(traceback2.format_exc())
예제 #9
0
    async def _probe_site(self, site: HttpWebSite):

        self.logger.debug(
            "[%s] Running site probes...", site.name
        )

        scout = HttpScout(
            verify_ssl=site.verify_ssl,
            request_timeout=site.request_timeout,
            limit_sim_conn=self.get_config_param(
                None, 'limit_sim_conn', 5
            ),
            basic_usr=site.basic_user,
            basic_pwd=site.basic_pass
        )

        try:

            self.logger.debug(
                "[%s] Creating session...", site.name
            )
            await scout.create_session()
            futures = []

            # Create the list of future for every resource
            for resource in site.get_resources():
                future = asyncio.ensure_future(
                    scout.probe_resource(site, resource)
                )
                futures.append(future)

            await asyncio.ensure_future(
                asyncio.gather(
                    *futures,
                    return_exceptions=True
                )
            )

            # Create futures of the probe results and send
            # messages to kafka brokers.
            probes = []
            for future in futures:
                probe_result = future.result()
                probes.append(probe_result.pack())

            msg = {
                'site': site.get_name(),
                'probes': probes,
            }

            await self._send2topic(msg, site.topic)

        except Exception as exc:  # pylint: disable=broad-except
            self.logger.error(
                '[%s] Unexpected error: %s\n%s', site.name, exc,
                '\n'.join(traceback2.format_exc().splitlines()) if self.debug
                else ''
            )
        finally:
            await scout.cleanup()
예제 #10
0
 def __split(self, url, job):
     try:
         pool = Pool(max_gevents)
         lock = Lock()
         with open(job['path'], (exists(job['path']) and 'rb+')
                   or 'wb') as file:
             with requests.Session() as sess:
                 list_set = set(job['queue'])
                 left, right = (-1, -1)
                 while right < job['length'] - 1:
                     left, right = (right + 1,
                                    min(right + range_size,
                                        job['length'] - 1))
                     if not (left, right) in list_set:
                         pool.spawn(self.__download, url, job, left, right,
                                    sess, file, lock)
             pool.join()
         if len(job['queue']) < int(ceil(1.0 * job['length'] / range_size)):
             job['status'] = 'miss'
             controler.miss()
             logger.debug(url + ' miss')
         else:
             job['status'] = 'done'
             controler.success()
             logger.debug(url + ' done')
             print(url + ' done')
         controler.dict_list.add(url, job)
     except Exception as e:
         logger.error(format_exc())
예제 #11
0
 async def get(self):
     try:
         data = json.loads(await self.request.read())
         cnt = await execute('delete from list where path=%s', (data, ))
         return web.json_response(data=cnt)
     except:
         logger.error(format_exc())
예제 #12
0
    def parse(self, data):
        self.name = data['name']

        try:
            self.type = parser.parse_type(data['type'])
            self.cname = theory.thy.get_overload_const_name(
                self.name, self.type)

            for rule in data['rules']:
                with context.fresh_context(defs={self.name: self.type}):
                    prop = parser.parse_term(rule['prop'])

                # Test conclusion of the prop
                _, concl = prop.strip_implies()
                f, _ = concl.strip_comb()
                if f != Const(self.name, self.type):
                    raise ItemException(
                        "Inductive %s: wrong head of conclusion" % self.name)

                self.rules.append({'name': rule['name'], 'prop': prop})

        except Exception as error:
            self.type = data['type']
            self.rules = data['rules']
            self.error = error
            self.trace = traceback2.format_exc()
예제 #13
0
    def parse(self, data):
        self.name = data['name']

        try:
            self.type = parser.parse_type(data['type'])
            self.cname = theory.thy.get_overload_const_name(
                self.name, self.type)

            for rule in data['rules']:
                with context.fresh_context(defs={self.name: self.type}):
                    prop = parser.parse_term(rule['prop'])

                # prop should be an equality
                if not prop.is_equals():
                    raise ItemException("Fun %s: rule is not an equality" %
                                        self.name)

                f, args = prop.lhs.strip_comb()
                if f != Const(self.name, self.type):
                    raise ItemException("Fun %s: wrong head of lhs" %
                                        self.name)
                lhs_vars = set(v.name for v in prop.lhs.get_vars())
                rhs_vars = set(v.name for v in prop.rhs.get_vars())
                if not rhs_vars.issubset(lhs_vars):
                    raise ItemException(
                        "Fun %s: extra variables in rhs: %s" %
                        (self.name, ", ".join(v for v in rhs_vars - lhs_vars)))

                self.rules.append({'prop': prop})

        except Exception as error:
            self.type = data['type']
            self.rules = data['rules']
            self.error = error
            self.trace = traceback2.format_exc()
예제 #14
0
    def parse(self, data):
        self.name = data['name']

        try:
            with context.fresh_context(vars=data['vars']):
                self.vars = context.ctxt.vars
                self.prop = parser.parse_term(data['prop'])

            # theorem does not already exist
            if theory.thy.has_theorem(self.name):
                raise ItemException("Theorem %s: theorem already exists")

            # prop should not contain extra variables
            self_vars = set(self.vars.keys())
            prop_vars = set(v.name for v in self.prop.get_vars())
            if not prop_vars.issubset(self_vars):
                raise ItemException(
                    "Theorem %s: extra variables in prop: %s" %
                    (self.name, ", ".join(v for v in prop_vars - self_vars)))

        except Exception as error:
            self.vars = data['vars']
            self.prop = data['prop']
            self.error = error
            self.trace = traceback2.format_exc()

        if 'attributes' in data:
            self.attributes = data['attributes']
예제 #15
0
    async def _parse_topic_messages(self, tp, messages):

        try:
            site_events_map = {}
            for message in messages:

                jmsg = json.loads(str(message.value.decode('ascii')))

                self.msgs_counter += 1
                self.logger.info(
                    "[%s/%d/%d] [%s] Received message %s", tp.topic,
                    tp.partition, message.offset,
                    str(uuid.UUID(bytes=message.key)),
                    str(message.value.decode('ascii'))
                    if self.debug else 'with %d events' % len(jmsg['probes']))

                # Organize messages for site
                if jmsg['site'] not in site_events_map:
                    site_events_map[jmsg['site']] = [jmsg]
                else:
                    site_events_map[jmsg['site']].append(jmsg)

            futures = []
            for site in site_events_map:
                events = site_events_map[site]
                futures.append(
                    asyncio.ensure_future(
                        self._process_site_events(site, tp.topic, events)))

            await asyncio.gather(*futures, return_exceptions=True)
        except Exception as exc:  # pylint: disable=broad-except
            self.logger.error(
                'Unexpected error or parse topic messages: %s\n%s', exc,
                '\n'.join(traceback2.format_exc().splitlines())
                if self.debug else '')
예제 #16
0
 async def get(self):
     try:
         session = await get_session(self.request)
         name = self.request.match_info.get('name')
         if not self.request.headers.get('Range'):
             headers = {'content-type': 'text/html charset = utf-8'}
             return web.Response(status='200',
                                 headers=headers,
                                 body='hello world!')
         ll, rr = self.request.headers['Range'].replace('bytes=',
                                                        '').split('-')
         left = int(ll)
         right = int(rr)
         path = share_home + name
         if not exists(path):
             return web.Response(status='404')
         else:
             headers = {}
             headers[
                 'Content-Range'] = 'bytes ' + ll + '-' + rr + '/' + str(
                     getsize(path))
             with open(path, 'rb') as f:
                 f.seek(left)
                 body = f.read(right - left + 1)
             return web.Response(status='206', headers=headers, body=body)
     except:
         logger.error(format_exc())
예제 #17
0
    def create_cache(self, data):
        self.username = data['username']
        self.theory_name = data['theory_name']
        self.thm_name = data['thm_name']
        self.vars = data['vars']
        self.prop = data['prop']
        self.steps = data['steps']

        if self.thm_name != '':
            limit = ('thm', self.thm_name)
        else:
            limit = None
        context.set_context(self.theory_name,
                            limit=limit,
                            username=self.username,
                            vars=self.vars)
        state = server.parse_init_state(self.prop)

        self.history = []
        self.states = [copy.copy(state)]
        self.error = None
        for step in self.steps:
            self.history.extend(state.parse_steps([step]))
            self.states.append(copy.copy(state))

        try:
            state.check_proof()
        except Exception as e:
            self.error = {
                'err_type': e.__class__.__name__,
                'err_str': str(e),
                'trace': traceback2.format_exc()
            }
예제 #18
0
    def main(self, parse_cmdline_opts=True):
        ans = 0
        if parse_cmdline_opts:
            self.parse_cmd_line()

        try:
            self.init_thread_pool(size=self.get_config_param(
                'general',
                'thread_pool_size',
                20,
            ))
            self.add_signal_handler()
            self.loop.run_until_complete(
                asyncio.ensure_future(self._async_main()))
        except KeyboardInterrupt:
            pass
        except CancelledError:
            pass
        except Exception as exc:  # pylint: disable=broad-except
            self.logger.error(
                'Unexpected error: %s\n%s', exc,
                '\n'.join(traceback2.format_exc().splitlines())
                if self.debug else '')
            ans = 1

        # Running closing operation of the producer
        self.loop.run_until_complete(self._cleanup())

        self.logger.info("Instance %s stopped.", self.name)
        self.loop.close()

        return ans
예제 #19
0
파일: views.py 프로젝트: vardhman1996/qdv
def validate_taxonomy(request):
    logging.info('In validate taxonomy')
    logging.info(request)
    logging.info(request.POST.get('file_input'))
    out_data = request.POST.get('out_data')
    if out_data is None or len(out_data) == 0:
        return render(request, 'detection/display_tax_results.html',
                      {'error': 'Please specify the out dataset name'})
    handle_uploaded_file(request.FILES['file_input'])
    kwargs_mimic = dict()
    kwargs_mimic['type'] = 'taxonomy_to_tsv'
    kwargs_mimic['input'] = op.join(get_qd_root(), 'visualization/taxonomy/')
    kwargs_mimic['data'] = out_data
    kwargs_mimic['datas'] = [
        s.strip() for s in request.POST.get('str_datas').split(',')
    ]
    if len(kwargs_mimic['datas']) <= 0 or \
            len(kwargs_mimic['datas']) == 1 and kwargs_mimic['datas'][0] == '':
        return render(request, 'detection/display_tax_results.html',
                      {'error': 'Please specify at least one data source'})
    try:
        build_taxonomy_impl(kwargs_mimic['input'], **kwargs_mimic)
    except Exception, e:
        print str(e)
        context = dict()
        context['files'] = []
        if str(e) == "":
            context['error'] = 'Taxonomy successfully verified'
            files = return_download_list(
                op.join(get_qd_root(), 'data/{}/'.format(out_data)))
            context['files'] = files
        else:
            trace = (traceback.format_exc())
            context['error'] = trace
        return render(request, 'detection/display_tax_results.html', context)
def fill_gaps(horizon=21*24*60*60):
	now = datetime.utcnow().replace(tzinfo=timezone.utc).timestamp()
	logger.info(f'Fill gaps in trade buckets through {now}')

	max_timestamp_dt = get_max_timestamp_of_trade_buckets()
	if max_timestamp_dt == None:
		logger.info('Could not find max(timestamp_dt) for trade buckets. Default to now.')
		max_timestamp_dt = now

	twenty_one_days_ago = now - horizon
	start_time = min(twenty_one_days_ago, max_timestamp_dt) + 60

	while start_time <= now:
		# start_time_str should have format "2020-01-02 18:03", understood to be in UTC timezone.
		start_time_dt = datetime.fromtimestamp(start_time, tz=timezone.utc)
		start_time_str = start_time_dt.strftime("%Y-%m-%d %H:%M")

		# Query 2 hours of trades at a time.  That's a 120 samples per API query (hence, count=120).
		try:
			trades = bitmex_client.Trade.Trade_getBucketed(symbol="XBTUSD", binSize="1m", partial=False, reverse=False, count=120, filter=json.dumps({"startTime": start_time_str})).result()
			logger.info(f'start_time_str:{start_time_str} ntrades:{len(trades[0])}')
		except Exception:
			logger.error(traceback2.format_exc())
			trades = None

		# Archive historical trades, if any. Advance the clock 2 hours, but only if the last API request succeeded.
		if trades:
			for k in range(0, len(trades[0])): insert_trade_bucket(trades[0][k])
			start_time = start_time + 2*60*60

		# Sleep to avoid API rate limit violations.
		time.sleep(2)
		# Update NOW in order to capture any trade buckets that may have completed since we started filling gaps.
		now = datetime.utcnow().replace(tzinfo=timezone.utc).timestamp()
예제 #21
0
 def stop(self):
     try:
         self.__downloader_with_206.stop()
         self.__downloader_without_206.stop()
         controler.stop()
     except Exception as e:
         logger.error(format_exc())
예제 #22
0
파일: log_email.py 프로젝트: zzy361/ROBO
        def call_func(*args, **kwargs):

            try:
                result = func(*args, **kwargs)
            except Exception as e:
                sender = emailSender
                receivers = emailReceiver
                message = MIMEMultipart()
                message['From'] = Header(u"定时任务", 'utf-8')
                message['To'] = Header("", 'utf-8')
                subject = u"定时任务" + os.path.split(__file__)[1]
                message['Subject'] = Header(subject, 'utf-8')
                errorinfo = u'异常信息:\n' + str(traceback2.format_exc())
                message.attach(MIMEText(errorinfo, 'plain', 'utf-8'))
                for i in range(100):
                    try:
                        smtpObj = smtplib.SMTP_SSL(senderServer, 465)
                        smtpObj.login(senderUsername, senderPassword)
                        smtpObj.sendmail(sender, receivers,
                                         message.as_string())
                        logging.info(u"邮件发送成功")
                        break
                    except smtplib.SMTPException as e:
                        logging.error(e)
                logging.error(errorinfo)

                exit(0)

            return result
예제 #23
0
 def start(self, home_i=home):
     try:
         controler.init(self.__queue.qsize())
         while not self.__queue.empty():
             self.__allocation(self.__queue.get(), home_i)
     except Exception as e:
         logger.error(format_exc())
예제 #24
0
파일: server.py 프로젝트: bzhan/holpy
    def parse_steps(self, steps):
        """Parse and apply a list of steps to self.

        Return the output from the list of steps.

        """
        history = []
        for step in steps:
            with global_setting(unicode=True, highlight=True):
                step_output = method.output_step(self, step)
            history.append({
                'step_output': step_output,
                'goal_id': step['goal_id'],
                'fact_ids': step.get('fact_ids', [])
            })
            try:
                method.apply_method(self, step)
                self.check_proof(compute_only=True)
            except Exception as e:
                history[-1]['error'] = {
                    'err_type': e.__class__.__name__,
                    'err_str': str(e),
                    'trace': traceback2.format_exc()
                }

        return history
예제 #25
0
파일: utils.py 프로젝트: xihadajiang/tfbtxj
    def _call( jyzd ):
        try:
            _jyzd = AttrDict( initd = jyzd , kword = None , nocopy = True )
            _tps.errLog( _tps.DEBUG , '交易字典内容输出:%s' % _jyzd , _tps.RPT_TO_LOG )
#            # 校验流水是否已登记
#            if not _jyzd.LSBZ:
#                ins_lsz( lsh = _jyzd.SYS_XTLSH, jyrq = _jyzd.SYS_JYRQ, jysj = _jyzd.SYS_XTSJ, jym = _jyzd.SYS_JYBM, jgdm = _jyzd.JYJGM, gyh = _jyzd.CZGY )
#                _jyzd.LSBZ = '1'
            r = func( _jyzd )
            # 校验交易字典中的值,确定全部转换为str类型
            for k , v in jyzd.items():
                #v = jyzd[ k ]
                if type(v) != str:
                    _tps.errLog( _tps.WARNING, '>>>>>>>>>>>>>>>>>>>[%s]类型错误[%s][%s]'%(k, type(v), repr(v)), _tps.RPT_TO_LOG )
                    if v == None: jyzd[ k ] = ''
                    else:         jyzd[ k ] = str( v )
            if r < 0:
                _tps.errLog( _tps.ERROR , '>>>>>>>>>>>>>>>>>>>函数返回:%s,%s' % ( _jyzd.SYS_RspCode , _jyzd.SYS_RspInfo ) , _tps.RPT_TO_LOG )
            return r
        except:
            msg = traceback2.format_exc( show_locals = True )
            _tps.errLog( _tps.ERROR ,  msg , _tps.RPT_TO_LOG )
            jyzd['SYS_RspCode'] = 'TS0004'
            jyzd['SYS_RspInfo'] = msg
            return -1
예제 #26
0
 async def get(self):
     try:
         headers = {'content-type': 'text/html charset = utf-8'}
         return web.Response(status='200',
                             headers=headers,
                             body='hello world!')
     except:
         logger.error(format_exc())
예제 #27
0
 async def get(self):
     try:
         name = self.request.match_info.get('name')
         result = await search('select * from list where name like %s',
                               ('%' + name + '%', ))
         return web.json_response(data=result)
     except:
         logger.error(format_exc())
예제 #28
0
 def __init__(self):
     try:
         self.__downloader_with_206 = downloader_with_206()
         self.__downloader_without_206 = downloader_whithout_206()
         self.__queue = Queue(max_works)
         self.host = ''
     except Exception as e:
         logger.error(format_exc())
예제 #29
0
 async def on_global_message(self, message):
     try:
         if message.author.id in self.bot.BAN:
             return
         new_filter = Filter(self.filter, self.black_link_filter,
                             self.white_link_filter)
         return_code = await new_filter.execute_filter(message.content)
         if return_code == 1:
             return await message.add_reaction("🚫")
         else:
             await message.add_reaction("✅")
         self.bot.global_chat_log[message.id] = {
             "sender": str(message.author),
             "sender_id": message.author.id,
             "guild": message.guild.id,
             "channel": message.channel.id,
             "content": message.content,
             "attachments": [],
             "attachments_count": 0,
             "webhooks": []
         }
         files = []
         for attachment in message.attachments:
             attached_file = await attachment.to_file()
             files.append(attached_file)
             self.bot.global_chat_log[message.id]["attachments"].append(
                 attachment.proxy_url)
         self.bot.global_chat_log[message.id]["attachments_count"] = len(
             files)
         for global_channel_id in self.bot.global_chat["general"]:
             if global_channel_id == message.channel.id:
                 continue
             global_channel = self.bot.get_channel(global_channel_id)
             if global_channel is None:
                 self.bot.global_chat["general"].remove(global_channel_id)
                 with open("./GLOBAL_CHAT.json", 'w') as db:
                     json.dump(self.bot.global_chat, db, indent=2)
                 continue
             channel_webhooks = await global_channel.webhooks()
             webhook = discord.utils.get(channel_webhooks,
                                         name="muffin-webhook")
             if webhook is None:
                 webhook = await global_channel.create_webhook(
                     name="muffin-webhook")
             files = []
             for attachment in message.attachments:
                 attached_file = await attachment.to_file()
                 files.append(attached_file)
             msg_obj = await webhook.send(
                 message.content,
                 username=message.author.name,
                 avatar_url=message.author.avatar_url,
                 files=files,
                 wait=True)
             self.bot.global_chat_log[message.id]["webhooks"].append(
                 msg_obj)
     except:
         await message.channel.send(traceback2.format_exc())
예제 #30
0
def insert_order(order_id, symbol, price, side, quantity, created_dt):
	logger.info(f'Inserting order: order_id:{order_id} symbol:{symbol} price:{price} side:{side} quantity:{quantity} created_dt:{created_dt}')
	try:
		with connection.cursor() as cursor:
			sql = "INSERT INTO `orders` (`order_id`, `symbol`, `price`, `side`, `quantity`, `created_dt`) VALUES (%s, %s, %s, %s, %s, FROM_UNIXTIME(%s))"
			cursor.execute(sql, (order_id, symbol, price, side, quantity, created_dt))
			connection.commit()
	except Exception:
		logger.error(traceback2.format_exc())
예제 #31
0
    def check_traceback_format(self, cleanup_func=None):
        try:
            if issubclass(six.binary_type, six.string_types):
                # Python 2.6 or other platform where the interpreter 
                # is likely going to be spitting out bytes, which will
                # then fail with io.StringIO(), so we skip the cross-
                # checks with the C API there. Note that _testcapi
                # is included in (at least) Ubuntu CPython packages, which
                # makes the import check less effective than desired.
                raise ImportError
            from _testcapi import traceback_print
        except ImportError:
            traceback_print = None
        try:
            self.some_exception()
        except KeyError:
            type_, value, tb = sys.exc_info()
            if cleanup_func is not None:
                # Clear the inner frames, not this one
                cleanup_func(tb.tb_next)
            traceback_fmt = u('Traceback (most recent call last):\n') + \
                            u('').join(traceback.format_tb(tb))
            if traceback_print is not None:
                file_ = StringIO()
                traceback_print(tb, file_)
                python_fmt  = file_.getvalue()
            # Call all _tb and _exc functions
            with captured_output("stderr") as tbstderr:
                traceback.print_tb(tb)
            tbfile = StringIO()
            traceback.print_tb(tb, file=tbfile)
            with captured_output("stderr") as excstderr:
                traceback.print_exc()
            excfmt = traceback.format_exc()
            excfile = StringIO()
            traceback.print_exc(file=excfile)
        else:
            self.fail("unable to create test traceback string")

        # Make sure that Python and the traceback module format the same thing
        if traceback_print is not None:
            self.assertEqual(traceback_fmt, python_fmt)
        # Now verify the _tb func output
        self.assertEqual(tbstderr.getvalue(), tbfile.getvalue())
        # Now verify the _exc func output
        self.assertEqual(excstderr.getvalue(), excfile.getvalue())
        self.assertEqual(excfmt, excfile.getvalue())

        # Make sure that the traceback is properly indented.
        tb_lines = traceback_fmt.splitlines()
        self.assertEqual(len(tb_lines), 5)
        banner = tb_lines[0]
        location, source_line = tb_lines[-2:]
        self.assertTrue(banner.startswith('Traceback'))
        self.assertTrue(location.startswith('  File'))
        self.assertTrue(source_line.startswith('    raise'))
예제 #32
0
def test_issue(encoding):
    expected = u"Всё очень плохо"
    try:
        foo()
    except Exception as e:
        text2 = traceback.format_exc()

    text3 = text(text2, encoding)
    print(u"EXCEPTION-TEXT: %s" % text3)
    print(u"text2: "+ text2)
    assert_that(text3, contains_string(u"AssertionError: Всё очень плохо"))
예제 #33
0
파일: log.py 프로젝트: chengdg/zjyw
def exception( logname , *args , **kwargs ):
    if logname:
        logger = init_log( logname )
        exc_msg = traceback2.format_exc( show_locals = True )
        args = list( args )
        if args:
            args[0] += '\n%s'
        else:
            args.append( '%s' )
        args.append( exc_msg )
        logger.error( _fmt_msg( *args , **kwargs ) )
        return ''
예제 #34
0
    def initialise(self, config, name):
        """
        Setup and start sensors
        :param config: the config to use when making the instrument
        :param name: a name for the instrument
        :return:

        """
        try:
            _default_log_dir = '/var/log/corr'
            config_file_dict = parse_ini_file(config)
            log_file_dir = config_file_dict.get('FxCorrelator').get('log_file_dir', _default_log_dir)
            assert os.path.isdir(log_file_dir)
            
            start_time = str(time.time())
            ini_filename = os.path.basename(config)
            log_filename = '{}_{}_sensor_servlet.log'.format(ini_filename, start_time)

            self.instrument = fxcorrelator.FxCorrelator(
                                'dummy fx correlator for sensors', config_source=config,
                                mass_inform_func=self.mass_inform, getLogger=getKatcpLogger,
                                log_filename=log_filename, log_file_dir=log_file_dir)
            self.instrument.initialise(program=False, configure=False, require_epoch=False,
                                    mass_inform_func=self.mass_inform, getLogger=getKatcpLogger,
                                    log_filename=log_filename, log_file_dir=log_file_dir)
            
            #disable manually-issued sensor update informs (aka 'kcs' sensors):
            sensor_manager = sensors.SensorManager(self, self.instrument,kcs_sensors=False,
                                                    mass_inform_func=self.mass_inform,
                                                    log_filename=log_filename,
                                                    log_file_dir=log_file_dir)
            self.instrument.sensor_manager = sensor_manager
            sensors_periodic.setup_sensors(sensor_manager,enable_counters=False)

            # Function created to reassign all non-conforming log-handlers
            loggers_changed = reassign_log_handlers(mass_inform_func=self.mass_inform, 
                                                    log_filename=log_filename, 
                                                    log_file_dir=log_file_dir,
                                                    instrument_name=self.instrument.descriptor)
            
        except Exception as exc:
            stack_trace = traceback.format_exc()
            return self._log_stacktrace(stack_trace, 'Failed to initialise sensor_servlet.')
예제 #35
0
def test_issue(encoding):
    """
    with encoding=UTF-8:
        File "/Users/jens/se/behave_main.unicode/tests/issues/test_issue0453.py", line 31, in problematic_step_impl
            raise Exception(u"по русски")
        Exception: \u043f\u043e \u0440\u0443\u0441\u0441\u043a\u0438

    with encoding=unicode_escape:
        File "/Users/jens/se/behave_main.unicode/tests/issues/test_issue0453.py", line 31, in problematic_step_impl
            raise Exception(u"по русски")
        Exception: по русски
    """
    context = None
    text2 = ""
    expected_text = u"по русски"
    try:
        problematic_step_impl(context)
    except Exception:
        text2 =  traceback.format_exc()

    text3 = text(text2, encoding)
    print(u"EXCEPTION-TEXT: %s" % text3)
    assert_that(text3, contains_string(u'raise Exception(u"по русски"'))
    assert_that(text3, contains_string(u"Exception: по русски"))