Beispiel #1
0
	def on_created(self, event):
		print "Recieved File %s" % event.src_path
		filename = event.src_path
		orders_action = {
			"customer_registered":"customer_registered",
			"product_ordered":"product_ordered",
			"order_declined":"order_declined",
			"order_accepted":"order_accepted",
			"order_cancelled":"order_cancelled",
			"order_fulfilled":"order_fulfilled"
		}
		reports_list = [
			"order_fulfilment_duration_report",
			"open_orders_report",
			"cancelled_orders_over_age_report"
		]
		file_actions = FileActions(self.db)
		reports = Reports(self.db)

		with open(filename) as json_file:
			data = json.load(json_file)

			for d in data:
				file_actions.action(orders_action[d["type"]], d)
			#list_values = [ v for v in row.values() ]
			#self.db["events"].insert(list_values)
			os.remove(filename)
		
		for rl in reports_list:
			reports.create_report(rl)

		print("Waiting for File.")	
Beispiel #2
0
    def GET(self):
        web.header('Content-Type', 'text/html;charset=UTF-8')
        form = web.input(email="*****@*****.**",
                         adgroupId=None,
                         starttime=None,
                         endtime=None)

        logger.info("Received user " + form.email +
                    " request to get adgroup report")

        httpsSession = createHttpsSession(form.email)
        reportsPath = cf.get("apiservices", "reports")
        reporthandler = Reports(reportsPath, httpsSession, logger)
        if form.adgroupId is not None:
            result = reporthandler.queryAdgroupReportById(
                email=form.email,
                adgroupId=form.adgroupId,
                starttime=form.starttime,
                endtime=form.endtime)
        else:
            result = reporthandler.query_all_adgroups(email=form.email,
                                                      starttime=form.starttime,
                                                      endtime=form.endtime)

        return result
Beispiel #3
0
async def main():
    logger = get_logger()
    logger.info("Start")
    auth = GAuth(logger)
    with open(f'{os.environ.get("SECRETS")}/creds.json', 'r') as f:
        creds = json.load(f)

    mail = GMail(auth.mail, logger)
    session = aiohttp.ClientSession()
    reports = Reports(session, mail, creds, logger)
    # Login
    login = asyncio.create_task(reports.login())

    # Get events to process
    calendar = GCal(auth.calendar, creds["cal_id"], logger)
    calendar.query_calendar()

    # Make sure we're logged in & process events
    await login
    if len(calendar.users) > 0:
        # Call reports.dowload_reports_and_send_draft for each new event, this downloads report and creates draft
        done, pending = await asyncio.wait([
            reports.download_reports_and_send_draft(data)
            for data in calendar.users
        ])

        # Patch events that drafts were succesfully created for (returned +ve)
        for task in done:
            if result := task.result():
                calendar.patch(result)
Beispiel #4
0
    def __init__(self, dbPath, dbName):
        self.database = dbPath + dbName
        self.dataPath = dbPath
        self.visits = Visits()
        self.guests = Guests()
        self.reports = Reports(self)
        self.teams = Teams()
        self.accounts = Accounts()
        self.devices = Devices()
        self.unlocks = Unlocks()
        # needs path since it will open read only
        self.customReports = CustomReports(self.database)
        self.certifications = Certifications()
        self.members = Members()
        self.logEvents = LogEvents()

        if not os.path.exists(self.database):
            if not os.path.exists(dbPath):
                os.mkdir(dbPath)
            with self.dbConnect() as c:
                self.migrate(c, 0)
        else:
            with self.dbConnect() as c:
                data = c.execute('PRAGMA schema_version').fetchone()
                if data[0] != SCHEMA_VERSION:
                    self.migrate(c, data[0])
    def generate_reports(self):
        """ After all scan files are parsed, begin generating Excel Tabs """
        logging.info('Generating Reports')

        reports = Reports(main_app.main_window)

        total_reports = list(
            filter(lambda x: x.startswith('rpt'), dir(reports)))
        index = 0
        for report in total_reports:
            index += 1
            status = f"Generating Report {report}"
            print(status)
            logging.info(status)

            if main_app.main_window:
                main_app.main_window.progressBar.setValue(
                    int(100 * index / (len(total_reports)) * .9))
                QtGui.QGuiApplication.processEvents()
            getattr(reports, report)()

        reports.close_workbook()

        status = f"Report Generated"
        logging.info(status)
        print(status)
        if main_app.main_window:
            main_app.main_window.statusBar().showMessage(status)
            main_app.main_window.progressBar.setValue(0)
            QtGui.QGuiApplication.processEvents()
Beispiel #6
0
    def generate_reports(self):
        """ After all scan files are parsed, begin generating Excel Tabs """

        reports = Reports(self.scan_results, self.data_mapping,
                          self.contact_info, self.poam_conf,
                          S2R.scans_to_reports)

        for report in filter(lambda x: x.startswith('rpt'), dir(reports)):
            print(report)
            getattr(reports, report)()
            QtWidgets.QApplication.processEvents()

        reports.close()
        if S2R.scans_to_reports:
            S2R.scans_to_reports.statusBar().showMessage("Report Generated")
Beispiel #7
0
 def __init__(self, chat_id, bot ):
     conn = sqlite3.connect(dataBaseDjangoDir)
     cursor = conn.cursor()
     cursor.execute("""select * from usuarios_usuario""")
     conn.commit()
     query = (cursor.fetchall())
     for user in query:
         if user[4] == chat_id:
             self.name = user[1]
             self.email = user[4]
             self.authorize = True
             self.help = Help(chat_id)
             self.sensiTags = SensiTags(bot)
             self.graphic = Graphics()
             self.report = Reports()
     self.chat_id = chat_id
Beispiel #8
0
 def re(self, event=""):
     self.withdraw()
     Reports(self, self.main_root)
Beispiel #9
0
 def reports_btn(self):
     root2 = Toplevel(self.master)
     myGui = Reports(root2)
Beispiel #10
0
    async def _get_html(self, speed=1):
        """
        :param speed: 翻页间隔时间,秒
        :return: 返回爬取页面的HTML内容
        """
        sql = "select shop_id from shop_info where shop_id!='88888888'"  # 获取所有的店铺ID
        shop_infos = mysql.get_data(sql=sql, dict_result=True)
        shop_ids = []
        for shop_info in shop_infos:
            page_control = Format._read(shop_id=shop_info['shop_id'], flag="total_page")  # 获得存储在本地的店铺总的页码数量
            if not page_control:
                page_control = 1000  # 如果没有获取到页码总数,给个1000的总数

            shop_ids.append(shop_info['shop_id'])  # 将店铺ID存储起来用于后面重置翻页数据

            url = self.start_url.replace("shop", "shop" + shop_info["shop_id"])  # 获得到店铺首页url地址
            await self.page.goto(url)
            await self._jump_to_search_page()
            page_num = Format._read(shop_info['shop_id'], "page_num")  # 读取存储在本地的page_num

            while page_num < page_control:
                start_time = time.time()  # 本页面开始的时间存入变量

                try:
                    # if page_num:
                    await self._goto_last_page_num(page_num + 1)
                    await asyncio.sleep(5)
                    frames = self.page.frames
                    for f in frames:
                        if await f.J("#TPL_username_1"):
                            yield 0, 0
                    frame = await self.login.get_nc_frame(frames=frames)
                    if frame:
                        await self.login.slider(self.page, 1)

                except Exception as e:
                    print(e)
                    await asyncio.sleep(5)
                    continue
                try:
                    await self.page.waitForSelector(".shop-hesper-bd.grid")
                except errors.TimeoutError:
                    break
                except Exception as e:
                    print(e)
                    continue
                Format._write(shop_id=shop_info['shop_id'], flag="page_num", value=page_num + 1)  # 将下次需要爬取的页码存入本地的配件中
                page_num = Format._read(shop_info['shop_id'], "page_num")  # 读取下一次要爬取的页码

                yield await self.page.content(), shop_info['shop_id']  # 返回页面HTML内容和

                page_control = Format._read(shop_id=shop_info['shop_id'], flag="total_page")  # 获得存储在本地的店铺总的页码数量

                await asyncio.sleep(speed)  # 翻页间隔时间
                spent_time_this_page = time.time() - start_time  # 计算本页完成时间
                spent_time = Format._read(shop_id=shop_info['shop_id'], flag="spent_time")  # 读取上一次存储在本地的时间
                Format._write(shop_id=shop_info['shop_id'], flag="spent_time",
                              value=spent_time + spent_time_this_page)  # 将本页面完成时间加上后并存储在本地
            is_mail = Format._read(shop_info['shop_id'], "mail")
            if not is_mail:
                Reports().report(shop_info['shop_id'].split(" "))

        for shop_id in shop_ids:
            Format._del(shop_id=shop_id, flag="page_num")  # 重置翻页的数据
            Format._del(shop_id=shop_id, flag="total_page")  # 重置总页码数据
            Format._del(shop_id=shop_id, flag="mail")  # 重置邮件标记
            Format._del(shop_id=shop_id, flag="spent_time")  # 重置完成时间
Beispiel #11
0
import schedule
from settings import config
from reports import Reports

# O
o_reports = Reports(config['o']['source'], config['o']['destination'], config['archive'])
# Z
z_reports = Reports(config['z']['source'], config['z']['destination'], config['archive'])
# R
r_reports = Reports(config['r']['source'], config['r']['destination'], config['archive'])

# Schedule
schedule.every(10).minutes.do(o_reports.move_all)
# --------------------------------------------------------
schedule.every().day.at('03:00').do(o_reports.create_week_archive)
schedule.every().day.at('03:10').do(z_reports.create_week_archive)
schedule.every().day.at('03:20').do(r_reports.create_week_archive)
# --------------------------------------------------------
schedule.every().day.at('05:00').do(r_reports.move_period)
# --------------------------------------------------------
schedule.every().day.at('06:00').do(z_reports.move_or_copy)
schedule.every().day.at('08:00').do(z_reports.move_or_copy)
schedule.every().day.at('12:00').do(z_reports.move_or_copy)
schedule.every().day.at('14:00').do(z_reports.move_or_copy)
schedule.every().day.at('16:00').do(z_reports.move_or_copy)

if __name__ == '__main__':
    while True:
        schedule.run_pending()
Beispiel #12
0
from flask import request, redirect, Flask, flash, render_template, send_from_directory, url_for, jsonify #pip install flask
from werkzeug.utils import secure_filename
import os,datetime,math
from customer import Customer 
from item import Item 
from payment import Payment
from transaction import Transaction
from goldprice import getTodaysGoldPrice
from reports import Reports
import util
# from util import encrypt
reports = Reports()
trans = Transaction()
payment = Payment()
item = Item()
app = Flask(__name__)
users = {'*****@*****.**': {'password': '******'}}
app.secret_key = 'super secret string'
customer = Customer()
UPLOAD_FOLDER = os.curdir+'\\uploads'
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
app.config['SERVERNAME'] = "reddy:5000"

@app.route('/')
def home():
    return render_template("home.html",reports=get_reports())

@app.route('/uploads/<filename>')
def uploaded_file(filename):
Beispiel #13
0
 def _get_html(self):
     for shop_id in self._get_shop_id():
         start_time = time.time()
         curls = self._get_curls(shop_id)
         if not curls:
             continue
         curl = random.choice(curls)
         page_num, used_page_nums, total_page = self._get_page_num(shop_id)
         session = requests.Session()
         while page_num:
             url, params, cookies, headers = self.format_request_params(
                 curl['curl'], page_num)
             while 1:
                 try:
                     proxy = Format._read("1", "proxy")
                     print(proxy)
                     if not proxy:
                         self._set_proxy()
                     proxies = {"https": "https://{}".format(proxy)}
                     r = session.get(url=url,
                                     params=params,
                                     cookies=cookies,
                                     headers=headers,
                                     proxies=proxies,
                                     stream=True)
                 except requests.exceptions.ProxyError:
                     self._set_proxy()
                     session = requests.Session()
                     continue
                 except requests.exceptions.InvalidURL:
                     self._set_proxy()
                     continue
                 except requests.exceptions.SSLError:
                     self._set_proxy()
                     continue
                 else:
                     break
             html = r.text.replace("\\", "")
             html = re.sub("jsonp\d+\(\"|\"\)", "", html)
             yield html, shop_id, curl, total_page, page_num
             spent_time = int(time.time() - start_time)
             used_page_nums.append(page_num)
             used_page_nums.sort()
             tspi = {  # tb_search_page_info
                 "used_page_nums":
                 ",".join([str(x) for x in used_page_nums]),
                 "spent_time": spent_time,
                 "last_date": datetime.date.today()
             }
             MySql.cls_update(db_setting=TEST_SERVER_DB_TEST,
                              t="tb_search_page_info",
                              set=tspi,
                              c={"shop_id": shop_id})
             page_num, used_page_nums, total_page = self._get_page_num(
                 shop_id)
         sql = "UPDATE tb_master SET flag='XiaJia',update_date='{}' WHERE shop_id='{}' AND update_date<'{}'".format(
             datetime.date.today(), shop_id, datetime.date.today())
         # print(sql)
         MySql.cls_update(db_setting=TEST_SERVER_DB_TEST, sql=sql)
     reports = Reports()
     reports.report([ids for ids in self._get_shop_id()])
from reports import Reports
import torch

# testing constants
MAX_EPISODE = 5000  # stop the training early and test the results

# don't move this- it creates circular dependencies.
report = Reports()

# flags
set_seed = True

# Program run constants
SEED = 0
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('Device: {}'.format(DEVICE))
num_frames = 300000
VIDEO_INTERVAL = 100  # change to 1 to record all videos
NUM_FRAMES_STACKED = 4
XYZ_GOAL = [.2, .1, 1.2]  # More specific goal numbers [0.231, 0.105, 1.261]
SOLVED_DISTANCE = 1.0  #
WRITE_TO_FILE = True
REPORT_INTERVAL = 1  # write all the reports
SIZE = (512, 512)
LR = 1e-2  # Other LR values, was 1e-3 or 3e-4

# prioritized replay params
PRIORITY = False  # True runs with priority replay
ALPHA = .6  # alpha param for priority replay buffer
BETA = .4  # initial value of beta
BETA_ITERS = None  # number of iterations over which beta will be annealed from initial value