def download(rqs, furl, path, level=3):
    if furl[0:4] != 'http':
        furl = "{}/{}".format(D2L_BASEURL, furl)

    file = rqs.get(furl, stream=True, headers={
        "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:64.0) Gecko/20100101 Firefox/64.0"
    })

    if file.status_code == 302:  # D2L, you don't f*****g redirect a 404/403 error.
        logger.error("Requested file is Not Found or Forbidden")

    if not os.path.isdir(safeFilePath(path)):
        # logger.info("Directory does not exist.")
        logger.debug(safeFilePath(path))
        mkdir_recursive(safeFilePath(path))

    try:
        name = furl.split('?')[0].split('/')[-1]

        if name == "DirectFileTopicDownload":
            name = file.headers['Content-Disposition'].split(';')[-1].split('=')[-1][1:-1]

        path += "/" + safeFilePath(name)
        with open(unquote(path), 'wb') as f:
            for chunk in tqdm.tqdm(file.iter_content(chunk_size=1024), desc="Downloading {}".format(name),
                                   position=level, unit="kb"):  # is it kb or b?

                if chunk:  # filter out keep-alive new chunks
                    f.write(chunk)
                    f.flush()
    except Exception as e:
        logger.exception("Exception caught during file download. {}", str(e))
예제 #2
0
def mark_overlaps():
    session = get_session(db_name)
    cleanup_overlap_tables(session)
    global total_only_strain_1_to_n, total_strain_plus_length_1_to_n, output_record, total_only_strain_1_to_1, total_strain_plus_length_1_to_1

    try:
        count_source_seq = source_sequences(
            session=session,
            database_source=source_database_source,
            for_overlaps_with_target_source=target_name,
            count_only=True)
        for source_seq in tqdm(
                total=count_source_seq,
                iterable=source_sequences(
                    session=session,
                    database_source=source_database_source,
                    for_overlaps_with_target_source=target_name)):
            only_strain = []
            strain_plus_length = []

            target_seq_query = target_sequences(
                session=session,
                matching_strain=source_seq.strain_name,
                database_source=target_database_source)

            for target_seq in target_seq_query:
                if target_seq.length == source_seq.length:
                    strain_plus_length.append(target_seq)
                else:
                    only_strain.append(target_seq)

            if len(strain_plus_length) > 0:
                if len(strain_plus_length) > 1:
                    output_record.append('\t\tWARN\t\t')
                    total_strain_plus_length_1_to_n += 1
                else:
                    total_strain_plus_length_1_to_1 += 1
                acc_ids = [s.accession_id for s in strain_plus_length]
                output_record.append(
                    f'{source_seq.accession_id} matches with {target_name} strain+length on {acc_ids}'
                )
                insert_overlaps_in_db(session, session, source_seq,
                                      strain_plus_length, 'COG-UK',
                                      target_name)
            elif len(only_strain) > 0:
                if len(only_strain) > 1:
                    output_record.append('\t\tWARN\t\t')
                    total_only_strain_1_to_n += 1
                else:
                    total_only_strain_1_to_1 += 1
                acc_ids = [s.accession_id for s in only_strain]
                output_record.append(
                    f'{source_seq.accession_id} matches with {target_name} strain on {acc_ids}'
                )
                insert_overlaps_in_db(session, session, source_seq,
                                      only_strain, 'COG-UK', target_name)

        if user_asked_to_commit:
            session.commit()
    except KeyboardInterrupt:
        rollback(session)
        output_record.append(
            "COMPUTATION INTERRUPTED. TOTALS MAY BE INCOMPLETE !!")
    except Exception as e:
        logger.exception("")
        rollback(session)
        output_record.append(
            "COMPUTATION INTERRUPTED. TOTALS MAY BE INCOMPLETE !!")
    finally:
        session.close()

        totals_string = f'TOTALS:\n' \
                        f'1-1 MATCHES: strain+length: {total_strain_plus_length_1_to_1} -- only strain {total_only_strain_1_to_1}\n' \
                        f'1-N MATCHES: strain+length: {total_strain_plus_length_1_to_n} -- only strain {total_only_strain_1_to_n} (search "WARN" to find \'em)\n'
        logger.info(totals_string)

        output_path = f'.{sep}overlaps{sep}{source_name}_{target_name}{sep}'
        output_path += f'{source_name}@{db_name}_overlapping_{target_name}@{db_name}'
        output_path += f'_{date.today().strftime("%Y-%b-%d")}.txt'
        output_path = output_path.lower()
        with open(file=output_path, mode='w') as w:
            for line in output_record:
                w.write(line + "\n")
            w.write(totals_string)
예제 #3
0
# -*- coding: utf-8 -*-
import Merge_data_set
import data
import class_cut
import os
from loguru import logger
logger.add("output.log", backtrace=True, diagnose=True)
try:
    filepath = r"C:/Users\JSKJ\Desktop\shenhe"  #总文件夹所在的上级目录的路径
    files = os.listdir(filepath)
    for name in files:
        Merge_data_set.main(filepath, name)
        data.main(filepath, name)
        class_cut.main(filepath, name)
    print("完成")
except:
    logger.exception('error')
예제 #4
0
    def mongo_to_mysql(self):
        # 将数据从mongo到mysql
        dbu = DBUtil()
        dbu.create_session_factory(
            con_str=
            'mysql+pymysql://root:[email protected]:3306/ncov?charset=utf8mb4?connect_timeout=30'
        )
        dbs = DBSession(dbu.get_new_session())

        n = 0
        self.client = MongoClient("mongodb://{}:{}/".format(
            self.host, self.port))
        self.database = self.client["ncov"]

        while 1:
            self.collection = self.database["DXYArea"]
            query = {}
            cursor = self.collection.find(query)
            sql = 'insert into dxyarea (_id,comment,confirmedCount,country,createTime,curedCount,deadCount,modifyTime,operator,provinceName,provinceShortName,suspectedCount,updateTime) values(:_id,:comment,:confirmedCount,:country,:createTime,:curedCount,:deadCount,:modifyTime,:operator,:provinceName,:provinceShortName,:suspectedCount,:updateTime)'
            for doc in cursor:
                doc['_id'] = str(doc['_id'])
                doc['updateTime'] = str(doc['updateTime'])
                try:
                    doc['modifyTime'] = str(doc['modifyTime'])
                except:
                    doc['modifyTime'] = None

                try:
                    doc['createTime'] = str(doc['createTime'])
                except:
                    doc['createTime'] = None
                try:
                    doc['operator'] = str(doc['operator'])
                except:
                    doc['operator'] = None

                    try:
                        if doc['cities'] != '':
                            if isinstance(doc['cities'], list) is False:
                                doc['cities'] = json.loads(doc['cities'])

                            for r in doc['cities']:
                                sql2 = 'insert into dxyarea_city (dxyarea_id,cityName,deadCount,curedCount,suspectedCount,confirmedCount) values(:dxyarea_id,:cityName,:deadCount,:curedCount,:suspectedCount,:confirmedCount)'
                                r['dxyarea_id'] = doc['_id']
                                try:
                                    dbs.exec_sql(sql2, r)

                                except Exception as e:
                                    emsg = (repr(e))
                                    if 'Duplicate entry' in emsg:
                                        pass
                                    else:
                                        logger.exception('遇到一个问题')
                            dbs.commit()
                        # else:
                        #     print(doc['cities'])
                    except:
                        logger.exception('')
                        # print(doc['cities'])
                        pass
                try:
                    dbs.exec_sql(sql, doc)

                except Exception as e:
                    emsg = (repr(e))
                    if 'Duplicate entry' in emsg:
                        pass
                    else:
                        logger.exception('遇到一个问题')
            dbs.commit()

            self.collection = self.database["DXYOverall"]
            query = {}
            cursor = self.collection.find(query)
            sql = 'insert into dxyoverall (_id,abroadRemark,confirmedCount,countRemark,curedCount,dailyPic,deadCount,generalRemark,infectSource,passWay,remark1,remark2,remark3,remark4,remark5,summary,suspectedCount,updateTime,virus) values(:_id,:abroadRemark,:confirmedCount,:countRemark,:curedCount,:dailyPic,:deadCount,:generalRemark,:infectSource,:passWay,:remark1,:remark2,:remark3,:remark4,:remark5,:summary,:suspectedCount,:updateTime,:virus)'
            for doc in cursor:
                doc['_id'] = str(doc['_id'])
                doc['updateTime'] = str(doc['updateTime'])
                try:
                    doc['abroadRemark'] = str(doc['abroadRemark'])
                except:
                    doc['abroadRemark'] = None
                try:
                    doc['generalRemark'] = str(doc['generalRemark'])
                except:
                    doc['generalRemark'] = None
                try:
                    dbs.exec_sql(sql, doc)

                except Exception as e:
                    emsg = (repr(e))
                    if 'Duplicate entry' in emsg:
                        pass
                    else:
                        logger.exception('遇到一个问题')
            dbs.commit()

            logger.debug('转换数据成功')
            sleep(30)
예제 #5
0
def e():
    f()


def f():
    g()


def g():
    h()


def h():
    i()


def i():
    j(1, 0)


def j(a, b):
    a / b


sys.tracebacklimit = 5

try:
    a()
except ZeroDivisionError:
    logger.exception("")
예제 #6
0
def b_explicit():
    try:
        c_not_decorated()
    except ZeroDivisionError:
        logger.exception("")
예제 #7
0
 def stop(self):
     try:
         self.remote.stop()
     except Exception:
         LOG.exception("Failed to shut down {} cleanly".format(self.name))
예제 #8
0
def run(args_, tables_format_rules=None):
    parser = argparse.ArgumentParser(
        description="Read CCF ledger or snapshot",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
    )
    parser.add_argument("paths",
                        help="Path to ledger directories or snapshot file",
                        nargs="+")
    parser.add_argument(
        "-s",
        "--snapshot",
        help="Indicates that the path to read is a snapshot",
        action="store_true",
    )
    parser.add_argument(
        "-t",
        "--tables",
        help="Regex filter for tables to display",
        type=str,
        default=".*",
    )
    parser.add_argument("--uncommitted",
                        help="Also parse uncommitted ledger files",
                        action="store_true")
    args = parser.parse_args(args_)

    table_filter = re.compile(args.tables)

    # Extend and compile rules
    tables_format_rules = tables_format_rules or []
    tables_format_rules.extend(default_tables_format_rules)
    tables_format_rules = [(re.compile(table_name_re), _)
                           for (table_name_re, _) in tables_format_rules]

    if args.snapshot:
        snapshot_file = args.paths[0]
        with ccf.ledger.Snapshot(snapshot_file) as snapshot:
            LOG.info(
                f"Reading snapshot from {snapshot_file} ({'' if snapshot.is_committed() else 'un'}committed)"
            )
            dump_entry(snapshot, table_filter, tables_format_rules)
    else:
        ledger_dirs = args.paths
        ledger = ccf.ledger.Ledger(ledger_dirs,
                                   committed_only=not args.uncommitted)

        LOG.info(f"Reading ledger from {ledger_dirs}")
        LOG.info(f"Contains {counted_string(ledger, 'chunk')}")

        try:
            for chunk in ledger:
                LOG.info(
                    f"chunk {chunk.filename()} ({'' if chunk.is_committed() else 'un'}committed)"
                )
                for transaction in chunk:
                    dump_entry(transaction, table_filter, tables_format_rules)
        except Exception as e:
            LOG.exception(f"Error parsing ledger: {e}")
            has_error = True
        else:
            LOG.success("Ledger verification complete")
            has_error = False
        finally:
            LOG.info(
                f"Found {ledger.signature_count()} signatures, and verified until {ledger.last_verified_txid()}"
            )
        return not has_error
예제 #9
0
def run_embeddings_experment(n_epochs=1, blocks=[2]):
    """
    Runs classification experiments to eval various defined architectures
    :return:
    """

    config = parse_opts()
    config.use_embeddings = True
    config.use_quadruplet = True
    if config.cuda_available:
        torch.cuda.set_device(config.cuda_id0)
        print(config.cuda_available)
        print(config.cuda_id0)
        print(torch.cuda.device_count())
        print(torch.cuda.is_available())
        print(torch.cuda.current_device())

    architectures = [ModelArch('resnet', '3d', 18, 'pretrained_models/resnet-18-kinetics-ucf101_split1.pth', 'A', 16,
                               2, 101),
                     ModelArch('resnet', '3d', 18, 'pretrained_models/resnet-18-kinetics-ucf101_split1.pth', 'A', 16,
                               1, 101),
                     ModelArch('resnet', '3d', 34, 'pretrained_models/resnet-34-kinetics.pth', 'A', 16, 2, 400),
                     ModelArch('resnet', '3d', 34, 'pretrained_models/resnet-34-kinetics.pth', 'A', 16, 1, 400),
                     ModelArch('resnet', '3d', 50, 'pretrained_models/resnet-50-kinetics.pth', 'B', 16, 2, 400),
                     ModelArch('resnet', '3d', 50, 'pretrained_models/resnet-50-kinetics.pth', 'B', 16, 1, 400),
                     ModelArch('resnet', '3d', 101, 'pretrained_models/resnet-101-kinetics.pth', 'B', 16, 2, 400),
                     ModelArch('resnet', '3d', 101, 'pretrained_models/resnet-101-kinetics.pth', 'B', 16, 1, 400),

                     ModelArch('resnet', 'ir_csn', 18, 'pretrained_models/resnet-18-kinetics-ucf101_split1.pth', 'A',
                               16, 1, 101),
                     ModelArch('resnet', 'ir_csn', 34, 'pretrained_models/resnet-34-kinetics.pth', 'A', 16, 2, 400),
                     ModelArch('resnet', 'ir_csn', 34, 'pretrained_models/resnet-34-kinetics.pth', 'A', 16, 1, 400),
                     ModelArch('resnet', 'ir_csn', 50, 'pretrained_models/resnet-50-kinetics.pth', 'B', 16, 2, 400),
                     ModelArch('resnet', 'ir_csn', 50, 'pretrained_models/resnet-50-kinetics.pth', 'B', 16, 1, 400),

                     ModelArch('resnext', '3d', 101, 'pretrained_models/resnext-101-64f-kinetics-hmdb51_split1.pth',
                               'B', 64, 2, 51),
                     ModelArch('resnext', '3d', 101, 'pretrained_models/resnext-101-64f-kinetics-hmdb51_split1.pth',
                               'B', 64, 1, 51)
                     ]

    for architecture in architectures:

        config.base_model = architecture.arch
        config.model_type = architecture.type
        config.model_depth = architecture.depth
        if architecture.depth < 50:
            config.batch_size = 80
        if config.cuda_available:
            assert os.path.exists(architecture.path)
            config.pretrain_path = architecture.path
        else:
            config.pretrain_path = ''
        config.resnet_shortcut = architecture.shortcut
        config.sample_duration = architecture.n_frames
        config.finetune_block = architecture.fn_block
        config.learning_rate = 0.0004

        result_path = os.path.join('embeddings_results', f'{architecture.arch}-{architecture.depth}',
                                   f'{architecture.type}'
                                   f'fn_block_{architecture.fn_block}')
        os.makedirs(result_path, exist_ok=True)
        config.result_path = result_path
        try:
            train(config)
        except Exception as e:
            log.exception(f'Couldn\'t run experiment for model: {architecture}. Error: {e}')
예제 #10
0
def run(args):

    s.validate_tests_signature(s.tests)

    if args.enforce_reqs is False:
        LOG.warning("Test requirements will be ignored")

    hosts = ["localhost", "localhost"]
    txs = app.LoggingTxs()
    network = infra.ccf.Network(hosts,
                                args.debug_nodes,
                                args.perf_nodes,
                                txs=txs)
    network.start_and_join(args)

    LOG.info(f"Running {len(s.tests)} tests for {args.test_duration} seconds")

    run_tests = {}
    success = True
    elapsed = args.test_duration

    for i, test in enumerate(s.tests):
        status = None
        reason = None

        if elapsed <= 0:
            LOG.warning(
                f"Test duration time ({args.test_duration} seconds) is up!")
            break

        try:
            LOG.debug(f"Running {s.test_name(test)}...")
            test_time_before = time.time()

            # Actually run the test
            new_network = test(network, args)
            status = TestStatus.success

        except reqs.TestRequirementsNotMet as ce:
            LOG.warning(f"Test requirements for {s.test_name(test)} not met")
            status = TestStatus.skipped
            reason = str(ce)
            new_network = network

        except Exception as e:
            LOG.exception(f"Test {s.test_name(test)} failed")
            status = TestStatus.failure
            new_network = network

        test_elapsed = time.time() - test_time_before

        # Construct test report
        run_tests[i] = {
            "name": s.test_name(test),
            "status": status.name,
            "elapsed (s)": round(test_elapsed, 2),
        }

        if reason is not None:
            run_tests[i]["reason"] = reason

        # If the test function did not return a network, it is not possible to continue
        if new_network is None:
            raise ValueError(
                f"Network returned by {s.test_name(test)} is None")

        # If the network was changed (e.g. recovery test), stop the previous network
        # and use the new network from now on
        if new_network != network:
            network.stop_all_nodes()
            network = new_network

        LOG.debug(f"Test {s.test_name(test)} took {test_elapsed:.2f} secs")

        # For now, if a test fails, the entire test suite if stopped
        if status is TestStatus.failure:
            success = False
            break

        elapsed -= test_elapsed

    network.stop_all_nodes()

    LOG.success(f"Ran {len(run_tests)}/{len(s.tests)} tests:")
    LOG.success(f"\n{json.dumps(run_tests, indent=4)}")

    if not success:
        sys.exit(1)
예제 #11
0
    async def scrim_loop(self):
        await self.wait_until_ready()
        channel_1 = self.get_channel(770408087613997076)

        # channel_2 = self.get_channel(637681714923044883)

        def star_phrases(stars):
            if stars == 3:
                # return ["Well that was just the bee's knees! 3 stars for you!",
                #         "You pulled a blinder there! Nice 3 star attack!",
                #         "That was a bloody good, three star attack!",
                #         "You must be chuffed! 3 more stars for your clan.",
                #         "That was a doodle!  Too easy!",
                #         "You got the full Monty! Well done!",
                #         "On it like a car bonnet!"]
                return [
                    "Holy smokes!  You ripped out three stars on that one!",
                    "I have no words. You are the reason I can wake up and face each day!",
                    "Tearing down the walls and taking names with three stars.",
                    "All hail the conquering heroes! Here are three stars for your clan!",
                    "Nothing less than perfection!", "I want to be you!"
                ]
            if stars == 2:
                # return ["Let's take a butchers and see if someone else can get all three stars.",
                #         "You made a dog's dinner of that one.  Keep practicing.",
                #         "That was almost mint. Guess someone else will have to nick that last star."]
                return [
                    "Almost there! You will get them next time.",
                    "At least we know where all the traps are now!",
                    "So close, yet so far.",
                    ("With a little coaching, I believe you will wreck that base next time. "
                     "Oh wait, there is no next time.")
                ]
            if stars == 1:
                # return ["Nothing to see here.  Just your bog-standard one star.",
                #         "Well that was a botch job. Wonky attack!",
                #         "Budge up and make some room for someone who will get three stars!",
                #         "Give me a tinkle on the blower and we'll chat about what went wrong there.",
                #         "That is minging.  You can do better than that!",
                #         "Is it me or was that attack a bit skew-whiff?",
                #         "Well that really throws a spanner in the works. We're going to have to do better than that."]
                return [
                    "Perhaps another strategy next time.",
                    "We are going to need some clean up on aisle 1.",
                    "May I recommend a good tutorial or perhaps a YouTube channel?",
                    "I'm doing a little quick math here and I'm not sure that's going to add to the win column."
                    "You are going to need more stars than that if you want to win this war!"
                ]
            if stars == 0:
                # return ["Kill the CC. Kill the heroes. Bob's your uncle. Wreck the base.  Better luck next time.",
                #         "You dropped a clanger there. Ask for help next time!",
                #         "Well, that went a bit pear-shaped.",
                #         "That just takes the biscuit!"]
                return [
                    "Do you even clash, bro?", "That was a scout, right?",
                    "Uh oh, I think we had a disco!",
                    "Uhhhhh, I'm just going to look the other way."
                    "It is OK.  Take a deep breath.  We didn't really need those stars anyway."
                ]

        for war_clan in war_clans:
            war = await coc_client.get_current_war(f"#{war_clan}")
            if war_clan == clan_1:
                fname = "scrim1.txt"
                channel = channel_1
                clan_1_name = clan_name_1
                clan_2_name = clan_name_2
                print(fname)
            else:
                fname = "scrim2.txt"
                channel = channel_2
                clan_1_name = "Team Eddy"
                clan_2_name = "Team HODL"
            with open(fname, "r") as f:
                last_attack = int(float(f.readline()))
            new_last_attack = last_attack
            # if war.state == "preparation" and self.flag == 0:
            #     hours = war.start_time.seconds_until // 3600
            #     minutes = (war.start_time.seconds_until % 3600) // 60
            #     content = f"{emoji_1} **RCS vs. Roar's** {emoji_2}"
            #     content += (f"\n{hours:.0f} hours and {minutes:.0f} minutes until the war begins.\n"
            #                 f"Come back and watch the RCS take away their roar!")
            #     await channel.send(content)
            #     self.flag = 1
            #     logger.info("Flag switched to 1. Prep message should not show again.")
            if war.state in ['inWar', 'warEnded']:
                hours = war.end_time.seconds_until // 3600
                minutes = (war.end_time.seconds_until % 3600) // 60
                print(f"{hours:02}:{minutes:02} left in war")
                try:
                    for attack in war.attacks:
                        print("Processing war attacks...")
                        print(
                            f"{attack.order}. {attack.attacker.town_hall} vs {attack.defender.town_hall}"
                        )
                        if attack.order > last_attack:
                            print(f"Processing attack #{attack.order}")
                            attacker_name = f"{str(attack.attacker.map_position)}. {attack.attacker.name}"
                            defender_name = f"{str(attack.defender.map_position)}. {attack.defender.name}"
                            if attack.defender.is_opponent:
                                attacker_name = f"{emoji_1} {attacker_name}"
                                defender_name = f"{emoji_2} {defender_name}"
                            else:
                                attacker_name = f"{emoji_2} {attacker_name}"
                                defender_name = f"{emoji_1} {defender_name}"
                            townhalls = f"({str(attack.attacker.town_hall)}v{str(attack.defender.town_hall)})"
                            line_1 = f"{attacker_name} just attacked {defender_name}"
                            stars = f"{emojis['stars']['new']*attack.stars}{emojis['stars']['empty']*(3-attack.stars)}"
                            line_2 = f"{stars} ({str(attack.destruction)}%) {townhalls}"
                            if attack.defender.is_opponent:
                                line_3 = f"{random.choice(star_phrases(attack.stars))}"
                            else:
                                # Should be "" if opponent is not RCS
                                line_3 = f"{random.choice(star_phrases(attack.stars))}"
                            content = f"{line_1}\n{line_2}\n{line_3}\n------------"
                            await channel.send(content)
                            logger.info(
                                f"Attack #{attack.order} processed and posted."
                            )
                            new_last_attack = attack.order
                            print(new_last_attack)
                except:
                    logger.exception("attack loop")
                if new_last_attack > last_attack:
                    if len(clan_1_name) > len(clan_2_name):
                        name_width = len(clan_1_name) + 3
                    else:
                        name_width = len(clan_2_name) + 3
                    zws = " \u200b"
                    clan_1_name = f"`{zws*(name_width-len(clan_1_name)-1)}{clan_1_name}{zws}`"
                    clan_2_name = f"`\u200b {clan_2_name}{zws*(name_width-len(clan_2_name)-2)}`"
                    clan_1_stars = f"{war.clan.stars}/{war.clan.max_stars}"
                    clan_1_stars = f"`{zws*(name_width-len(clan_1_stars)-1)}{clan_1_stars}{zws}`"
                    clan_2_stars = f"{war.opponent.stars}/{war.opponent.max_stars}"
                    clan_2_stars = f"`\u200b {clan_2_stars}{zws*(name_width-len(clan_2_stars)-2)}`"
                    if war.clan.destruction < 100:
                        width = 5
                        precision = 4
                        clan_1_per = f"{war.clan.destruction:{width}.{precision}}"
                    else:
                        clan_1_per = war.clan.destruction
                    clan_1_per = f"`{zws*(name_width-len(clan_1_per)-2)}{clan_1_per}%{zws}`"
                    if war.opponent.destruction < 100:
                        width = 4
                        precision = 4
                        clan_2_per = f"{war.opponent.destruction:{width}.{precision}}"
                    else:
                        clan_2_per = war.opponent.destruction
                    clan_2_per = f"`\u200b {clan_2_per}%{zws*(name_width-len(clan_2_per)-3)}`"
                    clan_1_attacks = f"{war.clan.attacks_used}/{war.team_size*2}"
                    clan_1_attacks = f"`{zws*(name_width-len(clan_1_attacks)-1)}{clan_1_attacks}{zws}`"
                    clan_2_attacks = f"{war.opponent.attacks_used}/{war.team_size*2}"
                    clan_2_attacks = f"`\u200b {clan_2_attacks}{zws*(name_width-len(clan_2_attacks)-2)}`"
                    content = f"{clan_1_name}{emojis['other']['gap']}{emojis['other']['rcs']}{emojis['other']['gap']}{clan_2_name}"
                    content += f"\n{clan_1_stars}{emojis['other']['gap']}{emojis['stars']['new']}{emojis['other']['gap']}{clan_2_stars}"
                    content += f"\n{clan_1_per}{emojis['other']['gap']}{emojis['other']['per']}{emojis['other']['gap']}{clan_2_per}"
                    content += f"\n{clan_1_attacks}{emojis['other']['gap']}{emojis['other']['swords']}{emojis['other']['gap']}{clan_2_attacks}"
                    await channel.send(content)
                    if war.end_time.seconds_until > 3600:
                        await channel.send(
                            f"{war.end_time.seconds_until // 3600:.0f} hours left in war."
                        )
                    else:
                        await channel.send(
                            f"{war.end_time.seconds_until // 60:.0f} minutes left in war."
                        )
                    try:
                        with open(fname, 'w') as f:
                            f.write(str(new_last_attack))
                    except:
                        logger.exception("Failed to write file")
            if datetime.now().hour == 4 and 0 < datetime.now().minute < 12:
                logger.debug(f"End of Loop | Flag = {self.flag}")
예제 #12
0
파일: main.py 프로젝트: etzhang416/sc2_bot
async def _play_replay(client, ai, realtime=False, player_id=0):
    ai._initialize_variables()

    game_data = await client.get_game_data()
    game_info = await client.get_game_info()
    client.game_step = 1
    # This game_data will become self._game_data in botAI
    ai._prepare_start(client, player_id, game_info, game_data, realtime=realtime)
    state = await client.observation()
    # Check game result every time we get the observation
    if client._game_result:
        await ai.on_end(client._game_result[player_id])
        return client._game_result[player_id]
    gs = GameState(state.observation)
    proto_game_info = await client._execute(game_info=sc_pb.RequestGameInfo())
    ai._prepare_step(gs, proto_game_info)
    ai._prepare_first_step()
    try:
        await ai.on_start()
    except Exception as e:
        logger.exception(f"AI on_start threw an error")
        logger.error(f"resigning due to previous error")
        await ai.on_end(Result.Defeat)
        return Result.Defeat

    iteration = 0
    while True:
        if iteration != 0:
            if realtime:
                # TODO: check what happens if a bot takes too long to respond, so that the requested
                #  game_loop might already be in the past
                state = await client.observation(gs.game_loop + client.game_step)
            else:
                state = await client.observation()
            # check game result every time we get the observation
            if client._game_result:
                try:
                    await ai.on_end(client._game_result[player_id])
                except TypeError as error:
                    # print(f"caught type error {error}")
                    # print(f"return {client._game_result[player_id]}")
                    return client._game_result[player_id]
                return client._game_result[player_id]
            gs = GameState(state.observation)
            logger.debug(f"Score: {gs.score.score}")

            proto_game_info = await client._execute(game_info=sc_pb.RequestGameInfo())
            ai._prepare_step(gs, proto_game_info)

        logger.debug(f"Running AI step, it={iteration} {gs.game_loop * 0.725 * (1 / 16):.2f}s")

        try:
            # Issue event like unit created or unit destroyed
            await ai.issue_events()
            await ai.on_step(iteration)
            await ai._after_step()

        except Exception as e:
            if isinstance(e, ProtocolError) and e.is_game_over_error:
                if realtime:
                    return None
                # result = client._game_result[player_id]
                # if result is None:
                #     logger.error("Game over, but no results gathered")
                #     raise
                await ai.on_end(Result.Victory)
                return None
            # NOTE: this message is caught by pytest suite
            logger.exception(f"AI step threw an error")  # DO NOT EDIT!
            logger.error(f"Error: {e}")
            logger.error(f"Resigning due to previous error")
            try:
                await ai.on_end(Result.Defeat)
            except TypeError as error:
                # print(f"caught type error {error}")
                # print(f"return {Result.Defeat}")
                return Result.Defeat
            return Result.Defeat

        logger.debug(f"Running AI step: done")

        if not realtime:
            if not client.in_game:  # Client left (resigned) the game
                await ai.on_end(Result.Victory)
                return Result.Victory

        await client.step()  # unindent one line to work in realtime

        iteration += 1
예제 #13
0
파일: main.py 프로젝트: etzhang416/sc2_bot
async def _play_game_ai(client, player_id, ai, realtime, step_time_limit, game_time_limit):
    if realtime:
        assert step_time_limit is None

    # step_time_limit works like this:
    # * If None, then step time is not limited
    # * If given integer or float, the bot will simpy resign if any step takes longer than that
    # * Otherwise step_time_limit must be an object, with following settings:
    #
    # Key         | Value      | Description
    # ------------|------------|-------------
    # penalty     | None       | No penalty, the bot can continue on next step
    # penalty     | N: int     | Cooldown penalty, BotAI.on_step will not be called for N steps
    # penalty     | "resign"   | Bot resigns when going over time limit
    # time_limit  | int/float  | Time limit for a single step
    # window_size | N: int     | The time limit will be used for last N steps, instad of 1
    #
    # Cooldown is a harsh penalty. The both loses the ability to act, but even worse,
    # the observation data from skipped steps is also lost. It's like falling asleep in
    # a middle of the game.
    time_penalty_cooldown = 0
    if step_time_limit is None:
        time_limit = None
        time_window = None
        time_penalty = None
    elif isinstance(step_time_limit, (int, float)):
        time_limit = float(step_time_limit)
        time_window = SlidingTimeWindow(1)
        time_penalty = "resign"
    else:
        assert isinstance(step_time_limit, dict)
        time_penalty = step_time_limit.get("penalty", None)
        time_window = SlidingTimeWindow(int(step_time_limit.get("window_size", 1)))
        time_limit = float(step_time_limit.get("time_limit", None))

    ai._initialize_variables()

    game_data = await client.get_game_data()
    game_info = await client.get_game_info()

    # This game_data will become self._game_data in botAI
    ai._prepare_start(client, player_id, game_info, game_data, realtime=realtime)
    state = await client.observation()
    # check game result every time we get the observation
    if client._game_result:
        await ai.on_end(client._game_result[player_id])
        return client._game_result[player_id]
    gs = GameState(state.observation)
    proto_game_info = await client._execute(game_info=sc_pb.RequestGameInfo())
    try:
        ai._prepare_step(gs, proto_game_info)
        await ai.on_before_start()
        ai._prepare_first_step()
        await ai.on_start()
    except Exception as e:
        logger.exception(f"AI on_start threw an error")
        logger.error(f"resigning due to previous error")
        await ai.on_end(Result.Defeat)
        return Result.Defeat

    iteration = 0
    while True:
        if iteration != 0:
            if realtime:
                # On realtime=True, might get an error here: sc2.protocol.ProtocolError: ['Not in a game']
                try:
                    requested_step = gs.game_loop + client.game_step
                    state = await client.observation(requested_step)
                    # If the bot took too long in the previous observation, request another observation one frame after
                    if state.observation.observation.game_loop > requested_step:
                        # TODO Remove these 2 comments
                        # t = state.observation.observation.game_loop
                        state = await client.observation(state.observation.observation.game_loop + 1)
                        # print(f"Requested step: {requested_step}, received: {t}, new: {state.observation.observation.game_loop}")
                except ProtocolError:
                    pass
            else:
                state = await client.observation()
            # check game result every time we get the observation
            if client._game_result:
                try:
                    await ai.on_end(client._game_result[player_id])
                except TypeError as error:
                    # print(f"caught type error {error}")
                    # print(f"return {client._game_result[player_id]}")
                    return client._game_result[player_id]
                return client._game_result[player_id]
            gs = GameState(state.observation)
            logger.debug(f"Score: {gs.score.score}")

            if game_time_limit and (gs.game_loop * 0.725 * (1 / 16)) > game_time_limit:
                await ai.on_end(Result.Tie)
                return Result.Tie
            proto_game_info = await client._execute(game_info=sc_pb.RequestGameInfo())
            ai._prepare_step(gs, proto_game_info)

        logger.debug(f"Running AI step, it={iteration} {gs.game_loop * 0.725 * (1 / 16):.2f}s")

        try:
            if realtime:
                # Issue event like unit created or unit destroyed
                await ai.issue_events()
                await ai.on_step(iteration)
                await ai._after_step()
            else:
                if time_penalty_cooldown > 0:
                    time_penalty_cooldown -= 1
                    logger.warning(f"Running AI step: penalty cooldown: {time_penalty_cooldown}")
                    iteration -= 1  # Do not increment the iteration on this round
                elif time_limit is None:
                    # Issue event like unit created or unit destroyed
                    await ai.issue_events()
                    await ai.on_step(iteration)
                    await ai._after_step()
                else:
                    out_of_budget = False
                    budget = time_limit - time_window.available

                    # Tell the bot how much time it has left attribute
                    ai.time_budget_available = budget

                    if budget < 0:
                        logger.warning(f"Running AI step: out of budget before step")
                        step_time = 0.0
                        out_of_budget = True
                    else:
                        step_start = time.monotonic()
                        try:
                            async with async_timeout.timeout(budget):
                                await ai.issue_events()
                                await ai.on_step(iteration)
                        except asyncio.TimeoutError:
                            step_time = time.monotonic() - step_start
                            logger.warning(
                                f"Running AI step: out of budget; "
                                + f"budget={budget:.2f}, steptime={step_time:.2f}, "
                                + f"window={time_window.available_fmt}"
                            )
                            out_of_budget = True
                        step_time = time.monotonic() - step_start

                    time_window.push(step_time)

                    if out_of_budget and time_penalty is not None:
                        if time_penalty == "resign":
                            raise RuntimeError("Out of time")
                        else:
                            time_penalty_cooldown = int(time_penalty)
                            time_window.clear()

                    await ai._after_step()
        except Exception as e:
            if isinstance(e, ProtocolError) and e.is_game_over_error:
                if realtime:
                    return None
                result = client._game_result[player_id]
                if result is None:
                    logger.error("Game over, but no results gathered")
                    raise
                await ai.on_end(result)
                return result
            # NOTE: this message is caught by pytest suite
            logger.exception(f"AI step threw an error")  # DO NOT EDIT!
            logger.error(f"Error: {e}")
            logger.error(f"Resigning due to previous error")
            try:
                await ai.on_end(Result.Defeat)
            except TypeError as error:
                # print(f"caught type error {error}")
                # print(f"return {Result.Defeat}")
                return Result.Defeat
            return Result.Defeat

        logger.debug(f"Running AI step: done")

        if not realtime:
            if not client.in_game:  # Client left (resigned) the game
                await ai.on_end(client._game_result[player_id])
                return client._game_result[player_id]

            await client.step()

        iteration += 1
예제 #14
0
    url = f"https://www1.ncdc.noaa.gov/pub/has/model/{order_id}/"

    df = pd.read_html(url, skiprows=2)[0]
    cols = df.columns[[1, 2, 3]]
    df = df[cols]
    df.columns = ["filename", "date", "size"]

    for fn in df["filename"].tolist():
        result = run(f"gsutil -q stat gs://carecur-gfs-data/grid3/{fn}",
                     hide=True,
                     warn=True)

        if result.exited:  # if we failed to find the file, create it
            ftp = f"ftp://ftp.ncdc.noaa.gov/pub/has/model/{order_id}/{fn}"
            cmd = f"curl {ftp} | gsutil cp - gs://carecur-gfs-data/grid3/{fn}"
            logger.info("Running {cmd}", cmd=cmd)
            run(cmd)
        else:
            logger.info("{fn} already exists.  Skipping.", fn=fn)


if __name__ == "__main__":
    for order_id in order_ids:
        try:
            load_orders(order_id)
        except Exception as e:
            logger.exception(
                " Skipping {order_id}. Encountered an unexpected error: {e}",
                order_id=order_id,
                e=e)
예제 #15
0
 def func_wrapper():
     set_interval(func, sec)
     try:
         func()
     except Exception:  # pylint: disable=broad-except
         logger.exception("Exception during execution, rescheduling sync")
예제 #16
0
                if not psutil.pid_exists(int(pyarkpid)):
                    if count == 1:
                        log.error(
                            f"Pyark process is not running. No process at pid [{pyarkpid}]"
                        )
                    count += 1
                else:
                    log.trace("pyark process passed pid check")

            try:
                lockhandle = open(str(pyarklockfile), "w")
                fcntl.lockf(lockhandle, fcntl.LOCK_EX | fcntl.LOCK_NB)
            except IOError:
                log.trace("pyark process passed file lock check")
            else:
                fcntl.flock(lockhandle, fcntl.LOCK_UN)
                lockhandle.close()
                if count == 1:
                    log.error(
                        f"Pyark process [{pyarkpid}] is not running. (Lockfile not locked)"
                    )
    except:
        if count == 1:
            log.exception(f"Error in arkwatchdog main loop!!")
            count += 1

    if count == 6:
        count = 1

    sleep(60)
예제 #17
0
    def run( self ):
        r""" Miner main loop.
        """
        # ---- Build Bittensor neuron ----
        with self:
            if self.config.neuron.use_wandb:
                bittensor.wandb(
                    config = self.config,
                    cold_pubkey = self.wallet.coldkeypub.ss58_address,
                    hot_pubkey = self.wallet.hotkey.ss58_address,
                    root_dir = self.config.neuron.full_path
                )

            # ---- Init run state ----
            self.epoch = 0   

            # ---- reloads previous run if not restart ----
            if self.config.neuron.no_restart:
                self.save()

            try:
                self.reload()
                self.axon.check()
            except Exception as e:
                logger.error("Error when trying to reload model: {}".format(e))
                self.save()
                self.reload()
                self.axon.check()
            
            self.stats.ema_scores = torch.nn.Parameter(torch.ones(self.metagraph.n.item()).to(self.device) * (1 / self.metagraph.n.item()), requires_grad = False)

            # --- Run until n_epochs ----
            while self.epoch < self.config.neuron.n_epochs:
                try:
                    # --- Init epoch stat----
                    self.stats.epoch_data_size = 0
                    self.stats.epoch_sync_count = 0
                    total_local_target_epoch_loss = 0
                    total_distillation_epoch_loss = 0
                    total_remote_target_epoch_loss = 0
                    total_local_epoch_acc = 0
                    batches_count = 0

                    # ---- Run epoch ----
                    start_block = self.subtensor.get_current_block() + 1
                    end_block = start_block + self.config.neuron.epoch_length
                    block_steps = [ block_delta for block_delta in range(start_block, end_block)]
                    progress_bar = qqdm( block_steps, total=len(block_steps), desc=format_str('blue', f'Epoch:'))
                    progress_bar.set_bar = partial(progress_bar.set_bar,  element='#')
                    for block in progress_bar:

                        # --- Iterate over batches until the end of the block.
                        current_block = self.subtensor.get_current_block()
                        while block >= current_block:
                            # ---- Forward pass ----
                            inputs = next( self.dataset )
                            output = self.nucleus.remote_forward (
                                inputs = inputs.to( self.device ),
                                training = True,
                            )
                            
                            # ---- Backward pass ----
                            output.loss = output.local_target_loss + output.distillation_loss + output.remote_target_loss
                            scores = torch.nn.functional.normalize ( torch.relu( self.nucleus.compute_scores(output.remote_target_loss) ), p=1, dim = 0 )
                            scores[output.query_uids] += 1e-6

                            output.loss.backward() # Accumulates gradients on the nucleus.
                            clip_grad_norm_(self.nucleus.parameters(), self.config.neuron.clip_gradients)
                            
                            # ---- Apply and zero accumulated gradients.
                            self.optimizer.step() 
                            self.optimizer.zero_grad()
                            current_block = self.subtensor.get_current_block()
                            
                            # ---- Aggrigate outputs and losses 
                            total_local_target_epoch_loss += output.local_target_loss.item()
                            total_distillation_epoch_loss += output.distillation_loss.item()
                            total_remote_target_epoch_loss += output.remote_target_loss.item()
                            total_local_epoch_acc += output.local_accuracy
                            self.stats.epoch_data_size += inputs.nelement()
                            batches_count += 1
                            
                            # ---- Expand ema_scores tensor if the chain grew and aggrigate the score
                            chain_growth = max(scores.shape[0] - self.stats.ema_scores.shape[0], 0)
                            if chain_growth > 0:
                                self.stats.ema_scores = torch.nn.Parameter(torch.cat( [self.stats.ema_scores, torch.zeros([chain_growth], dtype=torch.float32, device = self.device)]), requires_grad=False)
                            self.stats.ema_scores = self.fisher_ema_decay * self.stats.ema_scores + (1 - self.fisher_ema_decay) * scores
                            self.stats.scores = scores


                        # ---- Sync with metagraph if the current block >= last synced block + sync block time 
                        current_block = self.subtensor.get_current_block()
                        block_diff = current_block - self.stats.last_sync_block
                        if block_diff >= self.config.neuron.sync_block_time:
                            self.sync(current_block)                                                                                                                
                            self.stats.last_sync_block = current_block
                            self.stats.epoch_sync_count += 1
                            
                        # ---- Update the epoch loss if it is the last iteration within epoch
                        if block+1 == end_block :
                            self.stats.local_target_epoch_loss = total_local_target_epoch_loss / batches_count
                            self.stats.distillation_epoch_loss = total_distillation_epoch_loss / batches_count
                            self.stats.remote_target_epoch_loss = total_remote_target_epoch_loss / batches_count
                            self.stats.local_epoch_acc = total_local_epoch_acc / batches_count

                        # ---- Block logs.
                        self.logs (
                            progress_bar,
                            iteration = block-start_block,
                            output = output,
                        )
                        self.stats.global_step += 1

                    # ---- Update params ----
                    self.epoch += 1

                    # ---- Checkpoint state ----
                    self.checkpoint()

                except KeyboardInterrupt:
                    # --- User ended session ----
                    break

                except Exception as e:
                    # --- Unknown error ----
                    logger.exception('Unknown exception: {} with traceback {}', e, traceback.format_exc())
                    if self.config.neuron.restart_on_failure == True:
                        logger.info('Restarting from last saved state.')
                        self.reload()
                    else:
                        break
예제 #18
0
def process_gcn(payload, root):
    "The callback function when a GCN is received."
    DEBUG_TEST = config.get_config_for_key("DEBUG_TEST") or False

    # Get relevant info from VOEvent
    try:
        info = getinfo(root)
    except:
        logger.exception("Error getting info from VOEvent payload.")
        info = {}

    if not DEBUG_TEST:
        if info.get("role") == "test":
            logger.debug("Received Mock VOEvent.")
            return

    # Back up VOE file
    try:
        backup_voe(payload, info)
    except:
        logger.exception("Error backing up VOE file.")

    # Retrieve skymap and generate targets if necessary
    targets = None
    graphbytes = None
    if info.get("skymap_fits") is not None:
        try:
            skymap_hdulist = retrieve_skymap(info)
            try:
                backup_skymap(skymap_hdulist, info)
            except:
                logger.exception("Problem backing-up skymap")
            try:
                targets = scheduler.generate_targets(skymap_hdulist)
                try:
                    graphbytes = scheduler.graphtargets(
                        info, targets, skymap_hdulist)
                except:
                    logger.exception("Error graphing targets")
            except:
                logger.exception("Error generating targets")
            try:
                scheduler.get_distance(skymap_hdulist, info)
            except:
                logger.exception("Error getting distance")
        except:
            logger.exception(
                "Error downloading FITS skymap for Grace ID: {} from URL: {}".
                format(info.get("graceid"), info.get("skymap_fits")))

    # Send Alert by email
    try:
        sendalertemail(payload, info, targets_graph=graphbytes)
    except:
        logger.exception("Error sending alert email.")

    # Send Alert to Slack
    try:
        sendslack(info)
        logger.info("Alert message sent to Slack.")
    except:
        logger.exception("Error sending Slack message.")

    # Upload targets to broker site
    try:
        upload_gcnnotice(info, targets)
    except:
        logger.exception("Error uploading targets to broker.")
예제 #19
0
 def get_request(self, route: str) -> requests.Response:
     url = f'http://{self.ip}/{route}'
     response = requests.request('GET', url, auth=(self.cgi_user, self.cgi_password))
     if response.status_code == 401:
         logger.exception(response.raise_for_status())
     return response
예제 #20
0
 def wait(self):
     try:
         self.remote.wait_for_stdout_line(line="Global commit", timeout=5)
     except Exception:
         LOG.exception("Failed to wait on client {}".format(self.name))
예제 #21
0
                                        '进度    : {} \n'.format(
                                            str(share_course['courseName']), str(share_course['secret']),
                                            str(share_course['teacherName']),
                                            str(share_course['schoolName']),
                                            str(share_course['lessonName']), str(share_course['progress'])
                                        )
                                    self.bot.send_group_message(target_group, text)
                                break
            sleep(interval)


if __name__ == '__main__':
    zbot = ZhiHuiShuBot(client)
    error = 10
    while True:
        try:
            zbot.run()
        except KeyboardInterrupt:
            exit(0)
        except Exception as e:
            if isinstance(e, MiraiStatusError) and '错误代码[5]' in e.args[0]:
                logger.debug('错误代码[5]')
            else:
                logger.exception(e)
                zbot.bot.send_group_message(target_group, f'智慧树bot报错{str(e)}')
                sleep(2)
                error -= 1
                if error < 0:
                    zbot.bot.send_group_message(target_group, f'错太多了, server down, {str(e)}')
                    exit(0)
예제 #22
0
def sliding_window(a, ws, ss=None, flatten=True):
    """
    based on: https://stackoverflow.com/questions/22685274

    Return a sliding window over a in any number of dimensions

    Parameters
    ----------
    a : ndarray
        an n-dimensional numpy array
    ws : int, tuple
        an int (a is 1D) or tuple (a is 2D or greater) representing the size of
        each dimension of the window
    ss : int, tuple
        an int (a is 1D) or tuple (a is 2D or greater) representing the amount
        to slide the window in each dimension. If not specified, it defaults to ws.
    flatten : book
        if True, all slices are flattened, otherwise, there is an extra dimension
        for each dimension of the input.

    Returns
    -------
        strided : ndarray
            an array containing each n-dimensional window from a
    """

    if None is ss:
        # ss was not provided. the windows will not overlap in any direction.
        ss = ws
    ws = norm_shape(ws)
    ss = norm_shape(ss)

    # convert ws, ss, and a.shape to numpy arrays so that we can do math in every
    # dimension at once.
    ws = np.array(ws)
    ss = np.array(ss)
    shape = np.array(a.shape)

    # ensure that ws, ss, and a.shape all have the same number of dimensions
    ls = [len(shape), len(ws), len(ss)]
    if 1 != len(set(ls)):
        logger.exception(
            ValueError(
                f"a.shape, ws and ss must all have the same length. They were {ls}"
            ))

    # ensure that ws is smaller than a in every dimension
    if np.any(ws > shape):
        logger.exception(
            ValueError(
                f"ws cannot be larger than a in any dimension. a.shape was %s and "
                "ws was {(str(a.shape), str(ws))}"))

    # how many slices will there be in each dimension?
    newshape = norm_shape(((shape - ws) // ss) + 1)
    # the shape of the strided array will be the number of slices in each dimension
    # plus the shape of the window (tuple addition)
    newshape += norm_shape(ws)
    # the strides tuple will be the array's strides multiplied by step size, plus
    # the array's strides (tuple addition)
    newstrides = norm_shape(np.array(a.strides) * ss) + a.strides
    strided = np.lib.stride_tricks.as_strided(a,
                                              shape=newshape,
                                              strides=newstrides)
    if not flatten:
        return strided

    # Collapse strided so that it has one more dimension than the window.  I.e.,
    # the new array is a flat list of slices.
    meat = len(ws) if ws.shape else 0
    firstdim = (np.product(newshape[:-meat]), ) if ws.shape else ()
    dim = firstdim + (newshape[-meat:])
    dim = list(filter(lambda i: i != 1, dim))

    return strided.reshape(dim)
예제 #23
0
def run_continuous(scanner: DataPipeline, capture: SpecializedCapture, auto_push: bool):
    " run in continuous mode twice an hour "

    # check for new source code (return if found so watchdog can reload the main loop)
    if util_git.monitor_check(): return

    host = get_host()
    try:
        print("starting continuous run")

        # run the first time outside of the 'rety' logic 
        # so it fails if something is really wrong

        # get new external source data
        scanner.update_sources()

        # main scan/clean/extract loop 
        scanner.process()

        # run a one-off capture if requested
        if capture: do_specialized_capture(capture)

        # push to the git repo
        if auto_push: util_git.push(scanner.config.base_dir, f"{udatetime.to_logformat(scanner.change_list.start_date)} on {host}")

        # check for new source again
        if util_git.monitor_check(): return

        cnt = 1
        t = next_time()

        # run twice per hour forever
        #    on error, rety twice before going back to sleep until next cycle
        print(f"sleep until {t}")
        while True:
            time.sleep(15)
            if datetime.now() < t: continue

            if util_git.monitor_check(): break

            print("==================================")
            print(f"=== run {cnt} at {t}")
            print("==================================")

            retry_cnt = 0
            try:
                scanner.update_sources()
                scanner.process()
                if capture: do_specialized_capture(capture)
                if auto_push: util_git.push(scanner.config.base_dir, f"{udatetime.to_displayformat(scanner.change_list.start_date)} on {host}")
            except Exception as ex:
                logger.exception(ex)
                
                if retry_cnt < 2:
                    print(f"run failed, wait 5 minutes and try again")
                    t = t + timedelta(minutes=5)
                    retry_cnt += 1
                continue

            print("==================================")
            print("")
            t = next_time()
            print(f"sleep until {t}")                        
            cnt += 1
    finally:
        if capture: capture.close()
예제 #24
0
파일: network.py 프로젝트: rneatherway/CCF
    def _start_all_nodes(
        self,
        args,
        recovery=False,
        ledger_dir=None,
        read_only_ledger_dir=None,
        snapshot_dir=None,
    ):
        hosts = self.hosts

        if not args.package:
            raise ValueError("A package name must be specified.")

        self.status = ServiceStatus.OPENING
        LOG.info("Opening CCF service on {}".format(hosts))

        forwarded_args = {
            arg: getattr(args, arg)
            for arg in infra.network.Network.node_args_to_forward
        }

        for i, node in enumerate(self.nodes):
            try:
                if i == 0:
                    if not recovery:
                        node.start(
                            lib_name=args.package,
                            workspace=args.workspace,
                            label=args.label,
                            common_dir=self.common_dir,
                            members_info=self.consortium.get_members_info(),
                            **forwarded_args,
                        )
                    else:
                        node.recover(
                            lib_name=args.package,
                            workspace=args.workspace,
                            label=args.label,
                            common_dir=self.common_dir,
                            ledger_dir=ledger_dir,
                            read_only_ledger_dir=read_only_ledger_dir,
                            snapshot_dir=snapshot_dir,
                            **forwarded_args,
                        )
                        # When a recovery network in started without an existing network,
                        # it is not possible to know the local node IDs before the first
                        # node is started and has recovered the ledger. The local node IDs
                        # are adjusted accordingly then.
                        if self.existing_network is None:
                            self.wait_for_state(
                                node,
                                "partOfPublicNetwork",
                                timeout=args.ledger_recovery_timeout,
                            )
                            self._adjust_local_node_ids(node)
                else:
                    # When a new service is started, initial nodes join without a snapshot
                    self._add_node(
                        node,
                        args.package,
                        args,
                        recovery=recovery,
                        ledger_dir=ledger_dir,
                        from_snapshot=snapshot_dir is not None,
                        read_only_ledger_dir=read_only_ledger_dir,
                        snapshot_dir=snapshot_dir,
                    )
            except Exception:
                LOG.exception("Failed to start node {}".format(node.node_id))
                raise

        self.election_duration = (args.bft_view_change_timeout /
                                  1000 if args.consensus == "bft" else
                                  args.raft_election_timeout / 1000) * 2

        LOG.info("All nodes started")

        # Here, recovery nodes might still be catching up, and possibly swamp
        # the current primary which would not be able to serve user requests
        primary, _ = self.find_primary(
            timeout=args.ledger_recovery_timeout if recovery else 3)
        return primary
예제 #25
0
def c(n):
    1 / n
    try:
        c(n - 1)
    except ZeroDivisionError:
        logger.exception("")
예제 #26
0
def loop(matrix, pe):
    i = 0
    # canvas und loop setup
    canvas = matrix.CreateFrameCanvas(writeppm)
    x_min = 0
    y_min = 0
    x_max = canvas.width - 1 - (rightbar and (rightbarwidth + spacetr))
    y_max = canvas.height - 1

    linenum_min = x_min
    linenum_max = linenum_min + linenum_width - 1
    limit = (y_max - y_min + 1 - text_startr - fonttext.height + fonttext.baseline + lineheight) // lineheight
    x_pixels = x_max - x_min + 1

    currenttime = localtime()
    # xmax hier muss man eigentlich immer neu berechnen
    scrollx_stop_xmax = x_max-((not rightbar) and header_spacest+textpx(fonttext, clockstr_tt(currenttime)))
    stop_scroller = SimpleScrollline(x_min, scrollx_stop_xmax, symtextoffset, fonttext, lighttextColor, noscroll=not headerscroll)

    # tmp
    deptime_x_max = x_max

    deps: List[Departure] = []
    meldungs: List[Meldung] = []

    scrollx_msg_xmax = (canvas.width - 1) if scrollmsg_through_rightbar else x_max
    meldung_scroller = MultisymbolScrollline(x_min, scrollx_msg_xmax, symtextoffset, fonttext, lighttextColor, meldungicons, bgcolor_t=matrixbgColor_t, initial_pretext=2, initial_posttext=10)

    # tmp
    if args.message:
        meldungs.append(Meldung(symbol="ad", text=args.message))

    pe_f = None
    joined = True

    # "volles" Beispiel in dm_depdata.py
    depfun_efa: type_depfns = {
        ("efa-main", True): [(getefadeps, [{'serverurl': efaserver,
                                            'timeout': servertimeout,
                                            'ifopt': ifopt,
                                            'limit': limit*args.limit_multiplier,
                                            'tz': tz,
                                            'ignore_infoTypes': ignore_infoTypes,
                                            'ignore_infoIDs': ignore_infoIDs,
                                            'content_for_short_titles': content_for_short_titles,
                                           },
                                           {'serverurl': efaserver_backup,
                                            'timeout': servertimeout,
                                            'ifopt': ifopt,
                                            'limit': limit*args.limit_multiplier,
                                            'tz': tz,
                                            'ignore_infoTypes': ignore_infoTypes,
                                            'ignore_infoIDs': ignore_infoIDs,
                                            'content_for_short_titles': content_for_short_titles,
                                           },
                                          ])
                            ],
        }

    depfun_efadb: type_depfns = {
        ("efa-notr", True): [(getefadeps, [{'serverurl': efaserver,
                                            'timeout': servertimeout,
                                            'ifopt': ifopt,
                                            'limit': limit*args.limit_multiplier,
                                            'tz': tz,
                                            'exclMOT': trainTMOTefa,
                                            'ignore_infoTypes': ignore_infoTypes,
                                            'ignore_infoIDs': ignore_infoIDs,
                                            'content_for_short_titles': content_for_short_titles,
                                           },
                                           {'serverurl': efaserver_backup,
                                            'timeout': servertimeout,
                                            'ifopt': ifopt,
                                            'limit': limit*args.limit_multiplier,
                                            'tz': tz,
                                            'exclMOT': trainTMOTefa,
                                            'ignore_infoTypes': ignore_infoTypes,
                                            'ignore_infoIDs': ignore_infoIDs,
                                            'content_for_short_titles': content_for_short_titles,
                                           },
                                          ])
                            ],
        ("dbre-tr", True): [(getdbrestdeps, [{'serverurl': dbrestserver,
                                               'timeout': servertimeout,
                                               'ibnr': dbrestibnr,
                                               'limit': limit*args.limit_multiplier,
                                               'inclMOT': trainMOT,
                                             },
                                             {'serverurl': dbrestserver_backup,
                                               'timeout': servertimeout,
                                               'ibnr': dbrestibnr,
                                               'limit': limit*args.limit_multiplier,
                                               'inclMOT': trainMOT,
                                             }
                                            ]),
                            (getefadeps, [{'serverurl': efaserver,
                                           'timeout': servertimeout,
                                           'ifopt': ifopt,
                                           'limit': limit*args.limit_multiplier,
                                           'tz': tz,
                                           'inclMOT': trainTMOTefa,
                                           'ignore_infoTypes': ignore_infoTypes,
                                           'ignore_infoIDs': ignore_infoIDs,
                                           'content_for_short_titles': content_for_short_titles,
                                          },
                                          {'serverurl': efaserver_backup,
                                           'timeout': servertimeout,
                                           'ifopt': ifopt,
                                           'limit': limit*args.limit_multiplier,
                                           'tz': tz,
                                           'inclMOT': trainTMOTefa,
                                           'ignore_infoTypes': ignore_infoTypes,
                                           'ignore_infoIDs': ignore_infoIDs,
                                           'content_for_short_titles': content_for_short_titles,
                                          }
                                         ])
                           ],
        }

    depfunctions = depfun_efadb if dbrestibnr else depfun_efa
    if d3d9id:
        depfnlist_d3d9: type_depfnlist = [(getd3d9msgdata, [{'serverurl': d3d9server, 'timeout': servertimeout, 'dfi_id': d3d9id}])]
        depfunctions.update({('d3d9-m+d', False): depfnlist_d3d9})

    logger.info(f"started loop with depfunctions {', '.join(x[0] for x in depfunctions.keys())}")
    while True:
        # time_measure = monotonic()
        canvas.Fill(*matrixbgColor_t) if matrixbgColor_t else canvas.Clear()
        if joined and not i % step:
            joined = False
            pe_f = pe.submit(getdeps,
                             depfunctions=depfunctions,
                             getdeps_timezone=tz,
                             getdeps_lines=limit-header,
                             getdeps_placelist=placelist,
                             getdeps_mincountdown=countdownlowerlimit,
                             getdeps_max_retries=maxkwaretries,
                             extramsg_messageexists=bool(args.message),  # ob es *bereits* eine Meldung geben wird - aktuell nur durch args.message so.
                             delaymsg_enable=delaymsg_enable,
                             delaymsg_mindelay=delaymsg_mindelay,
                             etermmsg_enable=etermmsg_enable,
                             etermmsg_only_visible=etermmsg_only_visible,
                             nodepmsg_enable=True,
                             nortmsg_limit=nortmsg_limit)

        if pe_f.done() and not joined:
            try:
                deps, meldungs, _add_data = pe_f.result()
            except Exception as e:
                if e.__class__ != GetdepsEndAll:
                    logger.exception("exception from getdeps")
                deps = []
                meldungs = [Meldung(symbol="warn", text="Fehler bei Datenabruf. Bitte Aushangfahrpläne beachten.")]
            else:
                if args.message:
                    meldungs.append(Meldung(symbol="ad", text=args.message))
                for di, dep in enumerate(deps):
                    for _mel in dep.messages:
                        if _mel not in meldungs and ((not _mel.efa) or (efamenabled and di < limit-header-1)):
                            meldungs.append(_mel)
                _brightness = _add_data.get("brightness")
                if _brightness is not None and _brightness != matrix.brightness:
                    matrix.brightness = _brightness
            finally:
                joined = True
                meldung_scroller.update(meldungs)

        blinkstep = i % 40 < 20
        blinkon = blinkstep or not blink
        if rightbar or header:
            currenttime = localtime()
        r = y_min + text_startr

        if rightbar:
            # x_min, y_min usw. fehlen
            rightbarfn(canvas, x_max+1+spacetr, 0, rightbarwidth, rightbarfont, rightbarcolor, i, step, currenttime, *rightbarargs)

        if header:
            stop_scroller.update(ppm_stop if stopsymbol else None, headername or (deps and deps[0].stopname) or "")
            stop_scroller.render(canvas, r)

            if not rightbar:
                graphics.DrawText(canvas, fonttext, scrollx_stop_xmax+1+header_spacest, r, rtnoColor, clockstr_tt(currenttime))

            r += lineheight

        for dep in deps[:(limit-bool(meldungs)-header)]:
            if linenum_drawbg:
                for y in range(r-linenumheight, r):
                    graphics.DrawLine(canvas, linenum_min, y, linenum_max, y, linebgColor)

            _lnfont = fontlinenum
            linenumstr = dep.disp_linenum
            linenumpx = textpx(_lnfont, linenumstr)
            _roff = 0
            if linenumpx > linenum_width:
                shownchars_normal = propscroll(fontlinenum, linenumstr, linenum_min, linenum_max)
                shownchars_small = propscroll(fontnum, linenumstr, linenum_min, linenum_max)
                _search = linenumpattern.search(linenumstr)
                if _search is not None:
                    linenumstr = _search.group(1)+_search.group(2)
                    shownchars_normal = propscroll(fontlinenum, linenumstr, linenum_min, linenum_max)
                    shownchars_small = propscroll(fontnum, linenumstr, linenum_min, linenum_max)
                    if shownchars_small < len(linenumstr):
                        linenumstr = _search.group(1)
                        shownchars_normal = propscroll(fontlinenum, linenumstr, linenum_min, linenum_max)
                        shownchars_small = propscroll(fontnum, linenumstr, linenum_min, linenum_max)
                if shownchars_small > shownchars_normal and not linenumstr[shownchars_small-1] in {'(', '/'}:
                    linenumstr = linenumstr[:shownchars_small]
                    _lnfont = fontnum
                    _roff = linenum_normalsmalloffset
                else:
                    linenumstr = linenumstr[:shownchars_normal]
                    _lnfont = fontlinenum
                linenumpx = textpx(_lnfont, linenumstr)
            graphics.DrawText(canvas, _lnfont, linenum_max - linenumpx + (linenumpx == linenum_width), r-_roff, linefgColor, linenumstr)

            color = rtnoColor
            if dep.realtime:
                if dep.delay >= mindelay or dep.cancelled:
                    color = rtlateColor
                elif dep.delay >= minslightdelay:
                    color = rtslightColor
                elif dep.delay < 0:
                    color = rtnegativeColor
                else:
                    color = rtColor

            direction_x = linenum_max + 1 + spaceld
            directionpixel = deptime_x_max - direction_x
            timeoffset = 0

            if dep.cancelled:
                drawppm_bottomright(canvas, ppm_ausfall, deptime_x_max, r, transp=True)
                timeoffset += ppm_ausfall.size[0]
            elif dep.disp_countdown > maxmin:
                timestr = clockstr_tt(dep.deptime.timetuple())
                timestrpx = textpx(fontcountdown, timestr)
                graphics.DrawText(canvas, fontcountdown, deptime_x_max - timestrpx + 1, r, color, timestr)
                timeoffset += timestrpx
            elif blinkon and dep.disp_countdown == 0 and zerobus:
                drawppm_bottomright(canvas, ppmmotcolordict[dep.mot][color], deptime_x_max, r, transp=True)
                timeoffset += ppmmotdict[dep.mot].size[0]
            elif dep.disp_countdown or blinkon:
                timestr = str(dep.disp_countdown)
                timestrpx = textpx(fontcountdown, timestr)
                graphics.DrawText(canvas, fontcountdown, deptime_x_max - timestrpx - ((ppm_whitemin.size[0]-1+minoffset) if mintext else -1), r, color, timestr)
                timeoffset += timestrpx
                if mintext:
                    drawppm_bottomright(canvas, ppmmincolordict[color], deptime_x_max, r, transp=True)
                    timeoffset += ppm_whitemin.size[0] + minoffset

            # erweiterbar
            if dep.earlytermination:
                dirtextcolor = texthighlightColor
            else:
                dirtextcolor = textColor

            directionpixel -= (timeoffset + spacedt*bool(timeoffset))
            directionlimit = propscroll(fonttext, dep.disp_direction, direction_x, direction_x+directionpixel)
            graphics.DrawText(canvas, fonttext, direction_x, r, dirtextcolor, dep.disp_direction[:directionlimit])

            r += lineheight

        if meldungs:
            meldung_scroller.render(canvas, r)
            r += lineheight

        if progress:
            x_progress = int(x_pixels-1 - ((i % step)*((x_pixels-1)/step)))
            graphics.DrawLine(canvas, x_min, y_max, x_min+x_progress, y_max, barColor)

        if christmas:
            drawchristmas(canvas, x_min, x_max, y_min, y_max, i)

        if writeppm:
            canvas.ppm(ppmfile)

        canvas = matrix.SwapOnVSync(canvas)

        if gpiotest:
            inputs = matrix.AwaitInputChange(0)
            if inputs & (1 << 21):
                # check_output(["/sbin/shutdown", "now"])
                matrix.brightness = ((matrix.brightness - gpiotest_minb + 1) % (gpiotest_maxb - gpiotest_minb + 1)) + gpiotest_minb

        # _st = interval-monotonic()+time_measure
        # if _st > 0:
        #     sleep(_st)
        if interval > 0:
            sleep(interval)
        i += 1
예제 #27
0
파일: main.py 프로젝트: cgeopapa/dispike
    def edit_command(
        self,
        new_command: typing.Union[typing.List[DiscordCommand], DiscordCommand],
        command_id: int = None,
        bulk=False,
        guild_only=False,
        guild_id_passed=None,
    ) -> DiscordCommand:
        """Edits a command provided with a command_id and a valid new command.

        Args:
            command_id (int): Command ID
            new_command ([DiscordCommand, List[DiscordCommand]]): A valid DiscordCommand object (or a dict with proper syntax, if a dict is passed no verification will be made and discord will return the syntax error)
            guild_only (bool, optional): whether to target a guild. Defaults to False.
            guild_id_passed (bool, optional): guild id if guild_only is set to True. Defaults to None.
            bulk (bool, optional): Whether to specifiy if this action will be a bulk action.

        Returns:
            DiscordCommand: Returns the DiscordCommand object created. (Will return a DiscordCommand irregardless of new_command)

        Raises:
            TypeError: Invalid types passed.
            DiscordAPIError: any Discord returned errors.
        """

        if not isinstance(new_command, (DiscordCommand, dict, list)):
            raise TypeError(
                "New command must be a DiscordCommand or a valid dict.")

        if guild_only == True:
            if guild_id_passed == False:
                raise TypeError(
                    "You cannot have guild_only set to True and NOT pass any guild id."
                )
            if bulk == True:
                _url = f"/guilds/{guild_id_passed}/commands"
            else:
                _url = f"/guilds/{guild_id_passed}/commands/{command_id}"
        else:
            _url = "/commands"
        if bulk == True and isinstance(new_command, list):
            _new_command = [command.dict() for command in new_command]
            _selected_request_method = "PUT"
        else:
            _new_command = new_command.dict()
            _selected_request_method = "PATCH"
        try:
            _send_request = self._registrator._client.request(
                method=_selected_request_method,
                url=_url,
                headers=self._registrator.request_headers,
                json=_new_command,
            )
            if _send_request.status_code != 200:
                raise DiscordAPIError(_send_request.status_code,
                                      _send_request.text)

            if bulk == True:
                return [DiscordCommand(**x) for x in _send_request.json()]
            else:
                return DiscordCommand(**_send_request.json())
        except DiscordAPIError:
            logger.exception("Discord API Failure.")
            return False
        except Exception:
            logger.exception("Unknown exception returned")
            return False
예제 #28
0
def log_out(request):
    try:
        logout(request)
    except Exception as e:
        logger.exception("注销异常")
    return redirect(request.META['HTTP_REFERER'])
예제 #29
0
            if isinstance(exception.original, exceptions.SkyblockError):
                logger.exception(exception)
                return await ctx.send("An unknown error occurred. Please report this to the devs.")
        traceback_lines = traceback.format_exception(type(exception), exception, exception.__traceback__)
        logger.exception("".join(traceback_lines))
        logger.exception(exception)

    async def on_slash_command_error(self, ctx, exception):
        
        if isinstance(exception, exceptions.NeverPlayedSkyblockError):
            return await ctx.send(content="This player never played Hypixel Skyblock.")
        if isinstance(exception, exceptions.BadNameError):
            return await ctx.send(content="This username does not exist in Minecraft.")
        if isinstance(exception, exceptions.ExternalAPIError):
            logger.exception(exception)
            return await ctx.send(content="There has been an error while requesting the data from the API! Please try again after waiting some time..", delete_after=12)
        if isinstance(exception, exceptions.SkyblockError):
            logger.exception(exception)
            return await ctx.send(content="An unknown error occurred. Please report this to the devs.")
        traceback_lines = traceback.format_exception(type(exception), exception, exception.__traceback__)
        logger.exception("".join(traceback_lines))
        logger.exception(exception)


if __name__ == "__main__":
    skybot = Skybot()
    try:
        skybot.run(skybot.config["token"])
    except discord.LoginFailure:
        logger.exception("Improper token in config.json")
예제 #30
0
def init_routes(sanic_app, pages_path=None, ssl_enabled=False):  # pylint: disable=unused-variable
    """Innitiate all Ax routes"""
    del ssl_enabled
    try:
        sanic_app.static('/uploads', str(ax_misc.path('uploads')))
        sanic_app.static('/static', str(ax_misc.path('dist/ax/static')))
        sanic_app.static('/stats', str(ax_misc.path('dist/ax/stats.html')))
        sanic_app.static('/test_webpack',
                         str(ax_misc.path('dist/ax/test.html')))

        sanic_app.static('/editor.worker.js',
                         str(ax_misc.path('dist/ax/editor.worker.js')))
        sanic_app.static('/html.worker.js',
                         str(ax_misc.path('dist/ax/html.worker.js')))
        sanic_app.static('json.worker.js',
                         str(ax_misc.path('dist/ax/json.worker.js')))

        # Pages routes {
        pages_dist_path = str(ax_misc.path('dist/pages'))
        if pages_path:
            pages_dist_path = pages_path
        index_path = str(os.path.join(pages_dist_path, "index.html"))

        sanic_app.static('/pages/static',
                         str(ax_misc.path('dist/pages/static')))
        # TODO WTF this guid? maybe must be part of build?

        pre_cache_file_name = None
        pages_dist_dir = ax_misc.path('dist/pages')
        file_list = os.listdir(pages_dist_dir)
        for name in file_list:
            if "precache-manifest." in name:
                pre_cache_file_name = name

        sanic_app.static(
            f'/pages/{pre_cache_file_name}',
            str(ax_misc.path(f'dist/pages/{pre_cache_file_name}')))  # pylint: disable=line-too-long
        sanic_app.static('/pages/service-worker.js',
                         str(ax_misc.path('dist/pages/service-worker.js')))
        sanic_app.static('/pages/manifest.json',
                         str(ax_misc.path('dist/pages/manifest.json')))
        sanic_app.static('/pages/robots.txt',
                         str(ax_misc.path('dist/pages/robots.txt')))

        sanic_app.static('/pages', index_path)

        @sanic_app.route('/pages/<path:path>')
        def pages_index(request, path=None):  # pylint: disable=unused-variable
            """  """
            del request, path
            return response.html(open(index_path).read())

        @sanic_app.route('/signin')
        def pages_index1(request, path=None):  # pylint: disable=unused-variable
            """  """
            del request, path
            return response.html(open(index_path).read())

        # Pages routes }

        # Add tus upload blueprint
        sanic_app.blueprint(tus_bp)
        # Add web-socket subscription server
        subscription_server = WsLibSubscriptionServer(ax_schema.schema)

        @sanic_app.route('api/signout', methods=['GET'])
        @inject_user()
        @ax_protected()
        async def signout(request, user=None):  # pylint: disable=unused-variable
            """ Delete all auth cookies and redirect to signin """

            user_guid = user.get('user_id', None) if user else None
            if user_guid:
                key = f'refresh_token_{user_guid}'
                await ax_cache.cache.delete(key)

            to_url = '/signin'
            if request.args.get('to_admin', False):
                to_url = '/admin/signin'

            resp = response.redirect(to_url)
            del resp.cookies['ax_auth']
            del resp.cookies['access_token']
            del resp.cookies['refresh_token']
            return resp

        @sanic_app.route(
            '/api/file/<form_guid>/<row_guid>/<field_guid>/<file_name>',
            methods=['GET'])
        @inject_user()
        @ax_protected()
        async def db_file_viewer(  # pylint: disable=unused-variable
                request, form_guid, row_guid, field_guid, file_name, user):  # pylint: disable=unused-variable
            """ Used to display files that are stored in database.
                Used in fields like AxImageCropDb"""
            current_user = user
            del request, form_guid, file_name
            with ax_model.scoped_session(
                    "routes.db_file_viewer") as db_session:
                safe_row_guid = str(uuid.UUID(str(row_guid)))
                ax_field = db_session.query(AxField).filter(
                    AxField.guid == uuid.UUID(field_guid)).first()

                # first we select only guid and axState to know, if user have
                # access
                row_result = await ax_dialects.dialect.select_one(
                    db_session=db_session,
                    form=ax_field.form,
                    fields_list=[],
                    row_guid=safe_row_guid)

                state_name = row_result[0]['axState']
                state_guid = await ax_auth.get_state_guid(
                    ax_form=ax_field.form, state_name=state_name)

                user_guid = current_user.get('user_id',
                                             None) if current_user else None
                user_is_admin = current_user.get(
                    'is_admin', False) if current_user else False
                allowed_field_dict = await ax_auth.get_allowed_fields_dict(
                    ax_form=ax_field.form,
                    user_guid=user_guid,
                    state_guid=state_guid)

                field_guid = str(ax_field.guid)
                if not user_is_admin:
                    if (field_guid not in allowed_field_dict
                            or allowed_field_dict[field_guid] == 0
                            or allowed_field_dict[field_guid] is None):
                        email = current_user.get('email', None)
                        msg = (f'Error in db_file_viewer. ',
                               f'not allowed for user [{email}]')
                        logger.error(msg)
                        return response.text("", status=403)

                field_value = await ax_dialects.dialect.select_field(
                    db_session=db_session,
                    form_db_name=ax_field.form.db_name,
                    field_db_name=ax_field.db_name,
                    row_guid=safe_row_guid)

                return response.raw(field_value,
                                    content_type='application/octet-stream')

        @sanic_app.route(
            '/api/file/<form_guid>/<row_guid>/<field_guid>/<file_guid>/<file_name>',  # pylint: disable=line-too-long
            methods=['GET'])
        @inject_user()
        @ax_protected()
        async def file_viewer(  # pylint: disable=unused-variable
                request, form_guid, row_guid, field_guid, file_guid, file_name,\
                user=None):
            """ Used to display files uploaded and stored on disk.
                Displays temp files too. Used in all fields with upload"""
            del request
            current_user = user
            with ax_model.scoped_session(
                    "routes -> file_viewer") as db_session:
                # if row_guid is null -> display from /tmp without permissions
                if not row_guid or row_guid == 'null':
                    tmp_dir = os.path.join(ax_misc.tmp_root_dir, file_guid)
                    file_name = os.listdir(tmp_dir)[0]
                    temp_path = os.path.join(tmp_dir, file_name)
                    return await response.file(temp_path)

                # get AxForm with row values
                ax_form = db_session.query(AxForm).filter(
                    AxForm.guid == uuid.UUID(form_guid)).first()
                ax_form = await form_schema.set_form_values(
                    db_session=db_session,
                    ax_form=ax_form,
                    row_guid=row_guid,
                    current_user=current_user)

                # Get values from row, field
                field_values = None
                for field in ax_form.fields:
                    if field.guid == uuid.UUID(field_guid):
                        if field.value:
                            field_values = json.loads(field.value)

                            if type(field_values) is str:
                                try:
                                    field_values = json.loads(field_values)
                                except:
                                    pass

                # Find requested file in value
                the_file = None
                for file in field_values:
                    if file['guid'] == file_guid:
                        the_file = file

                if not the_file:
                    return response.text("", status=404)

                state_guid = await ax_auth.get_state_guid(
                    ax_form=ax_form, state_name=ax_form.current_state_name)

                user_guid = current_user.get('user_id',
                                             None) if current_user else None
                user_is_admin = current_user.get(
                    'is_admin', False) if current_user else False
                allowed_field_dict = await ax_auth.get_allowed_fields_dict(
                    ax_form=ax_form,
                    user_guid=user_guid,
                    state_guid=state_guid)

                if not user_is_admin:
                    if (field_guid not in allowed_field_dict
                            or allowed_field_dict[field_guid] == 0
                            or allowed_field_dict[field_guid] is None):
                        email = current_user['email']
                        msg = (f'Error in file_viewer. ',
                               f'not allowed for user [{email}]')
                        logger.error(msg)
                        return response.text("", status=403)

                # if file exists -> return file
                row_guid_str = str(uuid.UUID(row_guid))
                file_path = os.path.join(ax_misc.uploads_root_dir,
                                         'form_row_field_file', form_guid,
                                         row_guid_str, field_guid,
                                         the_file['guid'], the_file['name'])
                if not os.path.lexists(file_path):
                    return response.text("", status=404)
                return await response.file(file_path)

        @sanic_app.route('/admin/<path:path>')
        def index(request, path=None):  # pylint: disable=unused-variable
            """ This is MAIN ROUTE. (except other routes listed in this module).
                All requests are directed to Vue single page app. After that Vue
                handles routing."""
            del request, path
            absolute_path = ax_misc.path('dist/ax/index.html')
            return response.html(open(absolute_path).read())

        @sanic_app.route('/form/<path:path>')
        def index1(request, path=None):  # pylint: disable=unused-variable
            """ Copy of index. Sanic bug - https://github.com/huge-success/sanic/pull/1779"""
            del request, path
            absolute_path = ax_misc.path('dist/ax/index.html')
            return response.html(open(absolute_path).read())

        @sanic_app.route('/grid/<path:path>')
        def index2(request, path=None):  # pylint: disable=unused-variable
            """ Copy of index. Sanic bug - https://github.com/huge-success/sanic/pull/1779"""
            del request, path
            absolute_path = ax_misc.path('dist/ax/index.html')
            return response.html(open(absolute_path).read())

        @sanic_app.route('/admin/signin')
        def index3(request, path=None):  # pylint: disable=unused-variable
            """ Copy of index. Sanic bug - https://github.com/huge-success/sanic/pull/1779"""
            del request, path
            absolute_path = ax_misc.path('dist/ax/index.html')
            return response.html(open(absolute_path).read())

        @sanic_app.route('/')
        def handle_request(request):  # pylint: disable=unused-variable
            del request
            return response.redirect('/pages')

        @sanic_app.route('/api/draw_ax')
        async def draw_ax(request):  # pylint: disable=unused-variable
            """ Outputs bundle.js. Used when Ax web-components
                are inputed somewhere. Users can use this url for <script> tag
                """
            del request
            absolute_path = ax_misc.path('dist/ax/static/js/ax-bundle.js')
            return await response.file(
                absolute_path,
                headers={
                    'Content-Type': 'application/javascript; charset=utf-8'
                })

        @sanic_app.route('/draw_ax')
        async def draw_ax1(request):  # pylint: disable=unused-variable
            """ Outputs bundle.js. Used when Ax web-components
                are inputed somewhere. Users can use this url for <script> tag
                """
            del request
            absolute_path = ax_misc.path('dist/ax/static/js/ax-bundle.js')
            return await response.file(
                absolute_path,
                headers={
                    'Content-Type': 'application/javascript; charset=utf-8'
                })

        @sanic_app.websocket('/api/subscriptions', subprotocols=['graphql-ws'])
        async def subscriptions(request, web_socket):  # pylint: disable=unused-variable
            """Web socket route for graphql subscriptions"""
            del request
            try:
                # TODO: Why socket error exception occurs without internet
                await subscription_server.handle(web_socket)
                return web_socket
            except asyncio.CancelledError:
                pass
                # logger.exception('Socket error')

        @sanic_app.route('/api/ping', methods=['GET', 'HEAD'])
        async def ping(request):  # pylint: disable=unused-variable
            """ Ping function checks if Ax is running. Used with monit """
            del request
            from backend.schema import schema
            result = schema.execute("query { ping }")
            test_str = json.dumps(result.data, sort_keys=True, indent=4)
            return response.text(test_str)

        @sanic_app.route('/api/blog_rss')
        async def blog_rss(request):  # pylint: disable=unused-variable
            """ Fetches medium blog rss """
            del request
            feed = await ax_misc.fetch('https://medium.com/feed/@enf644')  # pylint: disable=line-too-long
            return response.text(feed)

        @sanic_app.route('/api/stackoverflow_rss')
        async def stackoverflow_rss(request):  # pylint: disable=unused-variable
            """ Fetches stackoverflow tag rss """
            del request
            feed = await ax_misc.fetch('https://stackexchange.com/feeds/tagsets/381786/ax-workflow?sort=active')  # pylint: disable=line-too-long
            return response.text(feed)

        @sanic_app.route('/api/home_welcome')
        async def home_welcome(request):  # pylint: disable=unused-variable
            """ Fetches welcome_free.md from ax-info """
            del request
            the_url = 'https://raw.githubusercontent.com/enf644/ax-info/master/welcome_free.md'
            if ax_auth.lise_is_active():
                the_url = 'https://raw.githubusercontent.com/enf644/ax-info/master/welcome.md'
            feed = await ax_misc.fetch(the_url)  # pylint: disable=line-too-long
            html = markdown2.markdown(feed)
            return response.text(html)

        @sanic_app.route('/api/marketplace_featured')
        async def marketplace_featured(request):  # pylint: disable=unused-variable
            """ Fetches featured apps json """
            del request
            feed = await ax_misc.fetch('https://raw.githubusercontent.com/enf644/ax-info/master/featured_apps.json')  # pylint: disable=line-too-long
            return response.text(feed)

        @sanic_app.route('/api/marketplace_all')
        async def marketplace_apps(request):  # pylint: disable=unused-variable
            """ Fetches all apps json """
            del request
            feed = await ax_misc.fetch('https://raw.githubusercontent.com/enf644/ax-info/master/apps.json')  # pylint: disable=line-too-long
            return response.text(feed)

        @sanic_app.route('/api/test')
        async def test(request):  # pylint: disable=unused-variable
            """Test function"""
            del request

            ret_str = await ax_migration.send_stats()

            # data = {
            #     "host": "hostHost",
            #     "usersNum": 10
            # }
            # value_str = json.dumps(data).replace('"', '\\"')
            # query_str = (
            #     "    mutation{"
            #     "        doAction("
            #     "            formDbName: \"AxUsageStats\""
            #     "            actionDbName: \"newStats\""
            #     f"            values: \"{value_str}\""
            #     "        ) {"
            #     "            ok"
            #     "        }"
            #     "    }"
            # )

            # json_data = {'query': query_str}
            # ret_str = await ax_misc.post_json(
            #     'http://127.0.0.1:8080/api/graphql', json_data)

            # ret_str = ax_model.engine.pool.status()
            # this.test_schema = 'IT WORKS'
            # ax_pubsub.publisher.publish(
            #     aiopubsub.Key('dummy_test'), this.test_schema)
            return response.text(ret_str)

        @sanic_app.route('/api/set')
        async def cache_set(request):  # pylint: disable=unused-variable
            """Cache Test function"""
            del request
            obj = ['one', 'two', 'three']
            await ax_cache.cache.set('user_list', obj)
            return response.text('Cache SET' + str(obj))

        @sanic_app.route('/api/get')
        async def cache_get(request):  # pylint: disable=unused-variable
            """Cache Test function"""
            del request
            obj = await ax_cache.cache.get('user_list')
            ret_str = 'READ cache == ' + \
                str(obj[0].username + ' - ' + os.environ['AX_VERSION'])
            return response.text(ret_str)

    except Exception:
        logger.exception('Error initiating routes.')
        raise
예제 #31
0
    def msg_received_callback(self, channel, method_frame, header_frame, body):
        """A callback method that runs when a RabbitMQ message is received.

        Here we parse the message, spin up an analyzer process, and report the
        metadata back to the Airtime web application (or report an error).
        """
        logger.info(
            f" - Received '{body}' on routing_key '{method_frame.routing_key}'"
        )

        # Declare all variables here so they exist in the exception handlers below, no matter what.
        audio_file_path = ""
        # final_file_path = ""
        import_directory = ""
        original_filename = ""
        callback_url = ""
        api_key = ""
        file_prefix = ""
        """ Spin up a worker process. We use the multiprocessing module and multiprocessing.Queue
            to pass objects between the processes so that if the analyzer process crashes, it does not
            take down the rest of the daemon and we NACK that message so that it doesn't get
            propagated to other airtime_analyzer daemons (eg. running on other servers).
            We avoid cascading failure this way.
        """
        try:
            try:
                body = body.decode()
            except (UnicodeDecodeError, AttributeError):
                pass
            msg_dict = json.loads(body)
            api_key = msg_dict["api_key"]
            callback_url = msg_dict["callback_url"]

            audio_file_path = msg_dict["tmp_file_path"]
            import_directory = msg_dict["import_directory"]
            original_filename = msg_dict["original_filename"]
            file_prefix = msg_dict["file_prefix"]
            storage_backend = msg_dict["storage_backend"]

            audio_metadata = MessageListener.spawn_analyzer_process(
                audio_file_path,
                import_directory,
                original_filename,
                storage_backend,
                file_prefix,
            )
            StatusReporter.report_success_to_callback_url(
                callback_url, api_key, audio_metadata)

        except KeyError as e:
            # A field in msg_dict that we needed was missing (eg. audio_file_path)
            logger.exception(
                "A mandatory airtime_analyzer message field was missing from the message."
            )
            # See the huge comment about NACK below.
            channel.basic_nack(
                delivery_tag=method_frame.delivery_tag,
                multiple=False,
                requeue=False)  # Important that it doesn't requeue the message

        except Exception as e:
            logger.exception(e)
            """ If ANY exception happens while processing a file, we're going to NACK to the
                messaging server and tell it to remove the message from the queue.
                (NACK is a negative acknowledgement. We could use ACK instead, but this might come
                 in handy in the future.)
                Exceptions in this context are unexpected, unhandled errors. We try to recover
                from as many errors as possible in AnalyzerPipeline, but we're safeguarding ourselves
                here from any catastrophic or genuinely unexpected errors:
            """
            channel.basic_nack(
                delivery_tag=method_frame.delivery_tag,
                multiple=False,
                requeue=False)  # Important that it doesn't requeue the message

            #
            # TODO: If the JSON was invalid or the web server is down,
            #       then don't report that failure to the REST API
            # TODO: Catch exceptions from this HTTP request too:
            if (
                    callback_url
            ):  # If we got an invalid message, there might be no callback_url in the JSON
                # Report this as a failed upload to the File Upload REST API.
                StatusReporter.report_failure_to_callback_url(
                    callback_url,
                    api_key,
                    import_status=2,
                    reason="An error occurred while importing this file",
                )

        else:
            # ACK at the very end, after the message has been successfully processed.
            # If we don't ack, then RabbitMQ will redeliver the message in the future.
            channel.basic_ack(delivery_tag=method_frame.delivery_tag)