def __init__(self, conn, id = None, reminder_item_etree = None):
        """
        Reminder-Item

        :param conn: Connection-Object
        """

        Bunch.__init__(self)

        self.conn = conn

        self.id = id  # integer
        self.created = None  # datetime
        self.article_id = None
        self.reminder_id = None  # integer
        self.position = None  # integer
        self.unit = None
        self.quantity = None  # float
        self.unit_price = None  # float
        self.title = None
        self.description = None
        self.total = None  # float

        if reminder_item_etree is not None:
            self.load_from_etree(reminder_item_etree)
        elif id is not None:
            self.load()
示例#2
0
def load_data_from(file_name, mode=None, external_params_name=None):
    """We assume that 'external_params' is a a valid json if passed
    """

    external_params = BuiltIn().\
        get_variable_value('${{{name}}}'.format(name=external_params_name))

    if not os.path.exists(file_name):
        file_name = os.path.join(os.path.dirname(__file__), 'data', file_name)
    with open(file_name) as file_obj:
        if file_name.endswith('.json'):
            file_data = Munch.fromDict(load(file_obj))
        elif file_name.endswith('.yaml'):
            file_data = Munch.fromYAML(file_obj)
    if mode == 'brokers':
        default = file_data.pop('Default')
        brokers = {}
        for k, v in file_data.iteritems():
            brokers[k] = merge_dicts(default, v)
        file_data = brokers

    try:
        ext_params_munch \
            = Munch.fromDict(loads(external_params)) \
            if external_params else Munch()
    except ValueError:
        raise ValueError(
            'Value {param} of command line parameter {name} is invalid'.
            format(name=external_params_name, param=str(external_params))
        )

    return merge_dicts(file_data, ext_params_munch)
示例#3
0
文件: actions.py 项目: seocam/copr
    def run(self):
        """ Handle action (other then builds) - like rename or delete of project """
        result = Munch()
        result.id = self.data["id"]

        action_type = self.data["action_type"]

        if action_type == ActionType.DELETE:
            if self.data["object_type"] == "copr":
                self.handle_delete_copr_project()
            elif self.data["object_type"] == "build":
                self.handle_delete_build()

            result.result = ActionResult.SUCCESS

        elif action_type == ActionType.LEGAL_FLAG:
            self.handle_legal_flag()

        elif action_type == ActionType.RENAME:
            self.handle_rename(result)

        elif action_type == ActionType.CREATEREPO:
            self.handle_createrepo(result)

        elif action_type == ActionType.UPDATE_COMPS:
            self.handle_comps_update(result)

        if "result" in result:
            if result.result == ActionResult.SUCCESS and \
                    not getattr(result, "job_ended_on", None):
                result.job_ended_on = time.time()

            self.frontend_client.update({"actions": [result]})
示例#4
0
    def __init__(self, conn, id = None, article_etree = None):
        """
        Article

        :param conn: Connection-Object
        """

        Bunch.__init__(self)

        self.conn = conn
        self.content_language = None

        self.id = id  # integer
        self.created = None  # datetime
        self.article_number = None
        self.number = None  # integer
        self.number_pre = None
        self.title = None
        self.description = None
        self.sales_price = None  # float
        self.sales_price2 = None  # float
        self.sales_price3 = None  # float
        self.sales_price4 = None  # float
        self.sales_price5 = None  # float
        self.currency_code = None
        self.unit_id = None  # integer
        self.tax_id = None  # integer
        self.purchase_price = None  # float
        self.purchase_price_net_gross = None
        self.supplier_id = None  # integer

        if article_etree is not None:
            self.load_from_etree(article_etree)
        elif id is not None:
            self.load()
示例#5
0
    def execute_with_lock(self, executable: str, lock: ConnectedConsulLockInformation, *, capture_stdout: bool=False,
                          capture_stderr: bool=False) -> Tuple[int, Optional[bytes], Optional[bytes]]:
        """
        TODO
        :param executable:
        :param lock:
        :param capture_stdout:
        :param capture_stderr:
        :return:
        """
        assert lock is not None
        redirects = Munch(stdout=subprocess.PIPE if capture_stdout else sys.stdout,
                         stderr=subprocess.PIPE if capture_stderr else sys.stderr)

        # Patch for when sys.stdout and sys.stderr have been reassigned (e.g. in IDE test runners)
        non_realtime_redirects: Dict[str, StringIO] = {}
        for name, redirect in redirects.items():
            if isinstance(redirect, StringIO):
                logger.warning(f"Cannot capture {name} in real-time as `sys.{name}` does not have a fileno")
                non_realtime_redirects[name] = redirect
                redirects[name] = subprocess.PIPE

        outputs = Munch(stdout=None, stderr=None)
        with lock:
            process = subprocess.Popen(executable, shell=True, stdout=redirects.stdout, stderr=redirects.stderr)
            outputs.stdout, outputs.stderr = process.communicate()

        # Second part of redirect reassignment patch
        for name, original_redirect in non_realtime_redirects.items():
            captured = outputs[name]
            getattr(sys, name).write(captured.decode("utf-8"))

        return process.returncode, \
               outputs.stdout if capture_stdout else None, \
               outputs.stderr if capture_stderr else None
示例#6
0
    def __init__(self, conn, id = None, reminder_etree = None):
        """
        Reminder

        :param conn: Connection-Object
        """

        Bunch.__init__(self)

        self.conn = conn
        self.content_language = None

        self.id = id  # integer
        self.created = None  # datetime
        self.status = None
        self.invoice_id = None  # integer
        self.contact_id = None  # integer
        self.reminder_text_id = None  # integer
        self.reminder_level = None  # integer
        self.reminder_level_name = None
        self.date = None  # date
        self.label = None
        self.subject = None
        self.intro = None
        self.note = None
        self.due_date = None  # date
        self.total_gross = None  # float
        self.is_old = None  # Steht dieses Flag auf 1, dann gibt es eine aktuellere Mahnung.

        if reminder_etree is not None:
            self.load_from_etree(reminder_etree)
        elif id is not None:
            self.load()
示例#7
0
def test_explicit_json(
        empty_proxy: PaywalledProxy,
        api_endpoint_address: str,
        client: Client,
        wait_for_blocks
):
    proxy = empty_proxy
    endpoint_url = "http://" + api_endpoint_address

    proxy.add_paywalled_resource(JSONResource, '/resource', 3)

    # test GET
    response = requests.get(endpoint_url + '/resource')
    assert response.status_code == 402
    headers = HTTPHeaders.deserialize(response.headers)
    assert int(headers.price) == 3

    channel = client.get_suitable_channel(headers.receiver_address, int(headers.price) * 4)
    wait_for_blocks(6)
    channel.update_balance(int(headers.price))

    headers = Munch()
    headers.balance = str(channel.balance)
    headers.balance_signature = encode_hex(channel.balance_sig)
    headers.sender_address = channel.sender
    headers.open_block = str(channel.block)
    headers = HTTPHeaders.serialize(headers)

    response = requests.get(endpoint_url + '/resource', headers=headers)
    assert response.status_code == 200
    # If headers don't merge properly, this results in 'application/json,application/json'.
    assert response.headers['Content-Type'] == 'application/json'
    assert response.json() == {'GET': 1}
示例#8
0
class NodeAPIResult(object):

    """Generic representation of a result from a call to the export API.
    """

    def __init__(self, data):
        if "nodelist" in data:
            self.nodelist = data["nodelist"]
            del data["nodelist"]
        if "result_shortlist" in data:
            self.shortlist = APIShortlist(data["result_shortlist"])
            del data["result_shortlist"]
        self.info = Munch(data)

    def __getitem__(self, key):
        return self.info[key]

    def __getattr__(self, name):
        return self.info.__getattr__(name)

    def iternode(self):
        return imap(APINode.from_dict, (l[0] for l in self.nodelist))

    def print_info(self):
        print(self.info.toYAML())
示例#9
0
def test_static_price(
        empty_proxy: PaywalledProxy,
        api_endpoint_address: str,
        client: Client,
        wait_for_blocks
):
    proxy = empty_proxy
    endpoint_url = "http://" + api_endpoint_address

    proxy.add_paywalled_resource(StaticPriceResource, '/resource', 3)

    # test GET
    response = requests.get(endpoint_url + '/resource')
    assert response.status_code == 402
    headers = HTTPHeaders.deserialize(response.headers)
    assert int(headers.price) == 3

    channel = client.get_suitable_channel(headers.receiver_address, int(headers.price) * 4)
    wait_for_blocks(6)
    channel.update_balance(int(headers.price))

    headers = Munch()
    headers.balance = str(channel.balance)
    headers.balance_signature = encode_hex(channel.balance_sig)
    headers.sender_address = channel.sender
    headers.open_block = str(channel.block)
    headers = HTTPHeaders.serialize(headers)

    response = requests.get(endpoint_url + '/resource', headers=headers)
    assert response.status_code == 200
    assert response.text.strip() == 'GET'

    assert_method(requests.post, endpoint_url + '/resource', headers, channel, 'POST')
    assert_method(requests.put, endpoint_url + '/resource', headers, channel, 'PUT')
    assert_method(requests.delete, endpoint_url + '/resource', headers, channel, 'DEL')
示例#10
0
def _get_arguments(argv, environ):
    from .__version__ import __version__
    from docopt import docopt
    from munch import Munch

    project_default = "[default: {}]".format(environ["JISSUE_PROJECT"]) if "JISSUE_PROJECT" in environ else ""
    version_default = "[default: {}]".format(environ["JISSUE_VERSION"]) if "JISSUE_VERSION" in environ else ""
    component_default = "[default: {}]".format(environ["JISSUE_COMPONENT"]) if "JISSUE_COMPONENT" in environ else ""
    issue_default = "[default: {}]".format(environ["JISSUE_ISSUE"]) if "JISSUE_ISSUE" in environ else ""
    doc_with_defaults = __doc__.format(
        project_default=project_default,
        version_default=version_default,
        component_default=component_default,
        issue_default=issue_default,
        issue="[<issue>]" if issue_default else "<issue>",
        project="[<project>]" if project_default else "<project>",
    )
    arguments = Munch(docopt(doc_with_defaults, argv=argv, help=True, version=__version__))
    if environ.get("JISSUE_PROJECT") and not arguments.get("<project>"):
        arguments["<project>"] = environ["JISSUE_PROJECT"]
    if environ.get("JISSUE_VERSION") and not arguments.get("--fix-version"):
        arguments["--fix-version"] = environ["JISSUE_VERSION"]
    if environ.get("JISSUE_COMPONENT") and not arguments.get("<component>"):
        arguments["<component>"] = environ["JISSUE_COMPONENT"]
    if environ.get("JISSUE_ISSUE") and not arguments.get("<issue>"):
        arguments["<issue>"] = environ["JISSUE_ISSUE"]
    return arguments
    def __init__(self, conn, id = None, invoice_item_etree = None):
        """
        Invoice-Item

        :param conn: Connection-Object
        """

        Bunch.__init__(self)

        self.conn = conn

        self.id = id  # integer
        self.article_id = None
        self.invoice_id = None  # integer
        self.position = None  # integer
        self.unit = None
        self.quantity = None  # float
        self.unit_price = None  # float
        self.tax_name = None
        self.tax_rate = None  # float
        self.title = None
        self.description = None
        self.total_gross = None  # float
        self.total_net = None  # float
        self.reduction = None
        self.total_gross_unreduced = None  # float
        self.total_net_unreduced = None  # float

        if invoice_item_etree is not None:
            self.load_from_etree(invoice_item_etree)
        elif id is not None:
            self.load()
    def __init__(self, conn, id = None, reminder_text_etree = None):
        """
        ReminderText

        :param conn: Connection-Object
        """

        Bunch.__init__(self)

        self.conn = conn
        self.content_language = None

        self.id = id  # Integer
        self.sorting = None  # Integer
        self.name = None
        self.subject = None
        self.header = None
        self.footer = None
        self.charge_name = None
        self.charge_description = None
        self.charge_amount = None  # Float

        if reminder_text_etree is not None:
            self.load_from_etree(reminder_text_etree)
        elif id is not None:
            self.load()
示例#13
0
    def __init__(self, conn, id = None, contact_etree = None):
        """
        Contact

        :param conn: Connection-Object
        """

        Bunch.__init__(self)

        self.conn = conn

        self.id = id  # integer
        self.created = None  # datetime
        self.client_id = None  # integer
        self.label = None
        self.name = None
        self.street = None
        self.zip = None
        self.city = None
        self.state = None
        self.country_code = None
        self.first_name = None
        self.last_name = None
        self.salutation = None
        self.phone = None
        self.fax = None
        self.mobile = None
        self.email = None
        self.www = None

        if contact_etree is not None:
            self.load_from_etree(contact_etree)
        elif id is not None:
            self.load()
示例#14
0
def test_copy():
    m = Munch(urmom=Munch(sez=Munch(what='what')))
    c = m.copy()
    assert c is not m
    assert c.urmom is not m.urmom
    assert c.urmom.sez is not m.urmom.sez
    assert c.urmom.sez.what == 'what'
    assert c == m
示例#15
0
def load_data_from_file(file_name):
    if not os.path.exists(file_name):
        file_name = os.path.join(os.path.dirname(__file__), file_name)
    with open(file_name) as file_obj:
        if file_name.endswith(".json"):
            return Munch.fromDict(load(file_obj))
        elif file_name.endswith(".yaml"):
            return Munch.fromYAML(file_obj)
示例#16
0
def test_setattr():
    b = Munch(foo="bar", this_is="useful when subclassing")
    assert hasattr(b.values, "__call__")

    b.values = "uh oh"
    assert b.values == "uh oh"

    with pytest.raises(KeyError):
        b["values"]
示例#17
0
def test_setattr():
    b = Munch(foo='bar', this_is='useful when subclassing')
    assert hasattr(b.values, '__call__')

    b.values = 'uh oh'
    assert b.values == 'uh oh'

    with pytest.raises(KeyError):
        b['values']
示例#18
0
    def identify(self, environ):
        '''Extract information to identify a user

        Retrieve either a username and password or a session_id that can be
        passed on to FAS to authenticate the user.
        '''
        log.info('in identify()')

        # friendlyform compat
        if not 'repoze.who.logins' in environ:
            environ['repoze.who.logins'] = 0

        req = webob.Request(environ, charset='utf-8')
        cookie = req.cookies.get(self.session_cookie)

        # This is compatible with TG1 and it gives us a way to authenticate
        # a user without making two requests
        query = req.GET
        form = Munch(req.POST)
        form.update(query)
        if form.get('login', None) == 'Login' and \
                'user_name' in form and \
                'password' in form:
            identity = {
                'login': form['user_name'],
                'password': form['password']
            }
            keys = ('login', 'password', 'user_name')
            for k in keys:
                if k in req.GET:
                    del(req.GET[k])
                if k in req.POST:
                    del(req.POST[k])
            return identity

        if cookie is None:
            return None

        log.info('Request identify for cookie %(cookie)s' %
                 {'cookie': to_bytes(cookie)})
        try:
            user_data = self._retrieve_user_info(
                environ,
                auth_params={'session_id': cookie})
        except Exception as e:  # pylint:disable-msg=W0703
            # For any exceptions, returning None means we failed to identify
            log.warning(e)
            return None

        if not user_data:
            return None

        # Preauthenticated
        identity = {'repoze.who.userid': user_data[1]['username'],
                    'login': user_data[1]['username'],
                    'password': user_data[1]['password']}
        return identity
示例#19
0
        def __init__(self, code, message, data):
            """
            :param code: Error code
            :param message: Error message
            :param data: Additional error informations
            """

            Bunch.__init__(self)
            self.code = code
            self.message = message
            self.data = data
示例#20
0
def group_med_line(x, y, **kwargs):
    opts = Munch(kwargs)

    y.index = x.values
    data = pd.DataFrame(y)
    meds = dict(data.reset_index().groupby('index').agg(np.median).iloc[:,0])

    if 'colors' not in opts.keys():
        opts.colors = {name: 'k' for name in meds.keys()}

    for name, val in meds.items():
        plt.axhline(y=val, linewidth=2, color=opts.colors[name], ls='solid', label=name, alpha=1)
示例#21
0
    def __setattr__(self, k, v):
        """Recursive.

        >>> x=AutoBunch()
        >>> setattr(x, 'mega.name', 'xy')

        """
        k2, _, k3 = k.partition('.')
        if k3:
            self.__getattr__(k2).__setattr__(k3, v)
        else:
            Munch.__setattr__(self, k, v)
示例#22
0
def write_to_couchdb(tender_data):
    # Запис у CouchDB
    tender = client.get_tender(tender_data.get('id')).data
    tender['_id'] = tender['id']

    # Якщо тендер існує - копіюємо його версію
    if db.doc_exist(tender['_id']):
        tender_from_couch = Munch(db.open_doc(tender['_id']))
        tender['_rev'] = tender_from_couch.get('_rev')
        db.save_doc(tender)
    # Якщо ні - створюємо його у базі
    else:
        db.save_doc(tender)
示例#23
0
def _get_arguments(argv, environ):
    from .__version__ import __version__
    from docopt import docopt
    from munch import Munch
    project_default = "[default: {}]".format(environ["JISSUE_PROJECT"]) if "JISSUE_PROJECT" in environ else ""
    version_default = "[default: {}]".format(environ["JISSUE_VERSION"]) if "JISSUE_VERSION" in environ else ""
    doc_with_defaults = __doc__.format(project_default=project_default, version_default=version_default,
                                       project="[--project=PROJECT]" if project_default else "--project=PROJECT",
                                       version="[--release=RELEASE]" if version_default else "--release=RELEASE")
    arguments = Munch(docopt(doc_with_defaults, argv=argv, help=True, version=__version__))
    if environ.get("JISSUE_PROJECT") and not arguments.get("--project"):
        arguments["--project"] = environ["JISSUE_PROJECT"]
    if environ.get("JISSUE_VERSION") and not arguments.get("--release"):
        arguments["--release"] = environ["JISSUE_VERSION"]
    return arguments
def update_configs(directory, to_update=None):
    """Collect, combine, and return all *.yaml files in `directory`."""
    confs = Path(directory).glob('*.yaml')

    confs = {p.stem.upper(): p for p in confs}

    if to_update is None:
        to_update = Munch()


    for name, conf in confs.items():
        c = process_config(config=conf)
        to_update.update(Munch({name: c}))

    return to_update
示例#25
0
def test_method_price(
        empty_proxy: PaywalledProxy,
        api_endpoint_address: str,
        client: Client,
        wait_for_blocks
):
    proxy = empty_proxy
    endpoint_url = "http://" + api_endpoint_address

    proxy.add_paywalled_resource(
        DynamicMethodResource,
        '/resource',
        resource_class_args=(42,),
        resource_class_kwargs={'bar': 9814072356})

    # test GET
    response = requests.get(endpoint_url + '/resource')
    assert response.status_code == 402
    headers = HTTPHeaders.deserialize(response.headers)
    assert int(headers.price) == 1

    channel = client.get_suitable_channel(headers.receiver_address, 1 + 2 + 3 + 4)
    wait_for_blocks(6)
    channel.update_balance(int(headers.price))

    headers = Munch()
    headers.balance = str(channel.balance)
    headers.balance_signature = encode_hex(channel.balance_sig)
    headers.sender_address = channel.sender
    headers.open_block = str(channel.block)
    headers = HTTPHeaders.serialize(headers)

    response = requests.get(endpoint_url + '/resource', headers=headers)
    assert response.status_code == 200
    assert response.text.strip() == 'GET'

    assert_method(requests.post,
                  endpoint_url + '/resource',
                  headers, channel, 'POST',
                  expected_price=2)
    assert_method(requests.put,
                  endpoint_url + '/resource',
                  headers, channel, 'PUT',
                  expected_price=3)
    assert_method(requests.delete,
                  endpoint_url + '/resource',
                  headers, channel, 'DEL',
                  expected_price=4)
示例#26
0
def test_contains():
    b = Munch(ponies="are pretty!")
    assert "ponies" in b
    assert ("foo" in b) is False

    b["foo"] = 42
    assert "foo" in b

    b.hello = "hai"
    assert "hello" in b

    b[None] = 123
    assert None in b

    b[False] = 456
    assert False in b
示例#27
0
def test_contains():
    b = Munch(ponies='are pretty!')
    assert 'ponies' in b
    assert ('foo' in b) is False

    b['foo'] = 42
    assert 'foo' in b

    b.hello = 'hai'
    assert 'hello' in b

    b[None] = 123
    assert None in b

    b[False] = 456
    assert False in b
示例#28
0
 def _check_session(self):
     if not 'FLASK_FAS_OPENID_USER' in flask.session \
             or flask.session['FLASK_FAS_OPENID_USER'] is None:
         flask.g.fas_user = None
     else:
         user = flask.session['FLASK_FAS_OPENID_USER']
         # Add approved_memberships to provide backwards compatibility
         # New applications should only use g.fas_user.groups
         user['approved_memberships'] = []
         for group in user['groups']:
             membership = dict()
             membership['name'] = group
             user['approved_memberships'].append(Munch.fromDict(membership))
         flask.g.fas_user = Munch.fromDict(user)
         flask.g.fas_user.groups = frozenset(flask.g.fas_user.groups)
     flask.g.fas_session_id = 0
示例#29
0
def load_initial_data_from(file_name):
    if not os.path.exists(file_name):
        file_name = os.path.join(os.path.dirname(__file__), 'data/{}'.format(file_name))
    with open(file_name) as file_obj:
        if file_name.endswith(".json"):
            return Munch.fromDict(load(file_obj))
        elif file_name.endswith(".yaml"):
            return fromYAML(file_obj)
示例#30
0
 def __init__(self, data):
     if "nodelist" in data:
         self.nodelist = data["nodelist"]
         del data["nodelist"]
     if "result_shortlist" in data:
         self.shortlist = APIShortlist(data["result_shortlist"])
         del data["result_shortlist"]
     self.info = Munch(data)
示例#31
0
 def Record(cls, **kwargs) -> Munch:
     return Munch(model=cls, **kwargs)
示例#32
0
def test_verify_password_is_true_for_empty_pw_with_insecure_mode():
    login = Login(username='******', password='')
    login.user = Munch(password='******')
    assert login.verify_password(True) is True
示例#33
0
                _ys, _xs = [arr.tolist for arr in poly.exterior.xy]
                _xs = [a for a, b, c, d, e, f, g, h in grouper(_xs, 8)]
                _ys = [a for a, b, c, d, e, f, g, h in grouper(_ys, 8)]
>>>>>>> Stashed changes
                xs.extend(_xs)
                xs.extend([_xs[0], None])
                ys.extend(_ys)
                ys.extend([_ys[0], None])

                centroid.append(list(reversed(list(poly.centroid.coords[0]))))
    except Exception as e:
        print(f"Centers: {centers}\nRadii: {radii}")
        print(e)
        return None

    return Munch({"hulls": hull, "xs": xs, "ys": ys, "centroids": centroid})


<<<<<<< Updated upstream
def diag2poly(p1, p2):
    points = [p1, (p1[0], p2[1]), p2, (p2[0], p1[1])]
    return points


=======
>>>>>>> Stashed changes
async def search_for_place_async(place_name, location=None, radius=300):
    if location is None:
        center = None
    elif location in coords:
        center = coords[location][0]
示例#34
0
class TestBuilder(object):
    BUILDER_BUILDROOT_PKGS = []
    BUILDER_CHROOT = "fedora-20-i386"
    BUILDER_TIMEOUT = 1024
    BUILDER_HOSTNAME = "example.com"
    BUILDER_USER = "******"
    BUILDER_REMOTE_BASEDIR = "/tmp/copr-backend-test"
    BUILDER_REMOTE_TMPDIR = "/tmp/copr-backend-test-tmp"
    BUILDER_PKG_NAME = "foovar"
    BUILDER_PKG_BASE = "foovar-2.41.f21"
    BUILDER_PKG_VERSION = "2.41.f21"
    BUILDER_PKG = "http://example.com/foovar-2.41.f21.src.rpm"

    BUILD_REMOTE_TARGET = "/tmp/copr-backend-test/foovar-2.41.f21.src.rpm"

    STDOUT = "stdout"
    STDERR = "stderr"

    RESULT_DIR = "/tmp"
    opts = Munch(
        ssh=Munch(transport="paramiko"),
        build_user=BUILDER_USER,
        timeout=BUILDER_TIMEOUT,
        remote_basedir=BUILDER_REMOTE_BASEDIR,
        remote_tempdir=BUILDER_REMOTE_TMPDIR,
        results_baseurl="http://example.com",
        redis_db=9,
        redis_port=7777,
    )

    GIT_HASH = "1234r"
    GIT_BRANCH = "f20"
    GIT_REPO = "foo/bar/xyz"

    def get_test_builder(self):
        self.job = BuildJob(
            {
                "project_owner": COPR_OWNER,
                "project_name": COPR_NAME,
                "pkgs": self.BUILDER_PKG,
                "repos": "",
                "build_id": 12345,
                "chroot": self.BUILDER_CHROOT,
                "buildroot_pkgs": self.BUILDER_BUILDROOT_PKGS,
                "git_repo": self.GIT_REPO,
                "git_hash": self.GIT_HASH,
                "git_branch": self.GIT_BRANCH,
                "package_name": self.BUILDER_PKG_NAME,
                "package_version": self.BUILDER_PKG_VERSION
            },
            Munch({
                "timeout": 1800,
                "destdir": self.test_root_path,
                "results_baseurl": "/tmp",
            }))

        self.mc_logger = MagicMock()
        builder = Builder(opts=self.opts,
                          hostname=self.BUILDER_HOSTNAME,
                          job=self.job,
                          logger=self.mc_logger)
        builder.checked = True

        builder.remote_pkg_name = self.BUILDER_PKG_BASE
        builder.remote_pkg_path = os.path.join(
            self.BUILDER_REMOTE_BASEDIR, self.BUILDER_PKG_BASE + ".src.rpm")

        return builder

    def setup_method(self, method):
        self.mc_ansible_runner_patcher = mock.patch(
            "backend.mockremote.builder.Runner")
        self.mc_ansible_runner = self.mc_ansible_runner_patcher.start()
        self.mc_ansible_runner.side_effect = lambda **kwargs: mock.MagicMock(
            **kwargs)

        self.test_root_path = tempfile.mkdtemp()

        self.stage = 0
        self.stage_ctx = defaultdict(dict)

    @property
    def buildcmd(self):
        return self.gen_mockchain_command(self.BUILDER_PKG)

    def teardown_method(self, method):
        self.mc_ansible_runner_patcher.stop()
        # remote tmp dir

        if os.path.exists(self.test_root_path):
            shutil.rmtree(self.test_root_path)

    def test_constructor(self):
        assert not self.mc_ansible_runner.called
        builder = self.get_test_builder()
        assert self.mc_ansible_runner.called

        assert builder.conn.remote_user == self.BUILDER_USER
        assert builder.root_conn.remote_user == "root"

    def test_get_remote_pkg_dir(self):
        builder = self.get_test_builder()
        expected = "/".join([
            self.BUILDER_REMOTE_TMPDIR, "build", "results",
            self.BUILDER_CHROOT, builder.remote_pkg_name
        ])
        assert builder._get_remote_results_dir() == expected

    def test_run_ansible(self):
        builder = self.get_test_builder()
        ans_cmd = "foo bar"

        for conn, as_root in [(builder.conn, False),
                              (builder.root_conn, True)]:
            for module_name in [None, "foo", "copy"]:
                run_count = conn.run.call_count
                builder._run_ansible(ans_cmd, as_root=as_root)
                assert conn.run.call_count == run_count + 1
                assert conn.module_args == ans_cmd
                assert conn.module_name == module_name or "shell"

    def test_check_for_ans_answer(self):
        """
            Silly test. Ansible api has almost no documentation,
            so we can only cover some return patterns :(

        """
        tested_func = builder_module.check_for_ans_error

        cases = [
            {
                "args": [
                    {
                        "dark": {},
                        "contacted": {}
                    }, self.BUILDER_HOSTNAME
                ],
                "kwargs": {},
                "expected_return": None,
                "expected_exception": VmError
            },
            {
                "args": [
                    {
                        "dark": {self.BUILDER_HOSTNAME: ""},
                        "contacted": {}
                    }, self.BUILDER_HOSTNAME
                ],
                "kwargs": {},
                "expected_return": None,
                "expected_exception": VmError
            },
            {
                "args": [
                    {
                        "dark": {},
                        "contacted": {self.BUILDER_HOSTNAME: {
                            "rc": 0,
                            "stdout": "stdout",
                            "stderr": "stderr",
                            "stdother": "stdother",
                        }}
                    }, self.BUILDER_HOSTNAME
                ],
                "kwargs": {},
                "expected_return": None,
                "expected_exception": None
            },
            {
                "args": [
                    {
                        "dark": {},
                        "contacted": {self.BUILDER_HOSTNAME: {
                            "rc": 1,
                            "stdout": "stdout",
                            "stderr": "stderr",
                            "stdother": "stdother",
                        }}
                    }, self.BUILDER_HOSTNAME
                ],
                "kwargs": {},
                "expected_return": None,
                "expected_exception": AnsibleResponseError
            },
            {  # 5
                "args": [
                    {
                        "dark": {},
                        "contacted": {self.BUILDER_HOSTNAME: {
                            "rc": 1,
                            "stdout": "stdout",
                            "stderr": "stderr",
                            "stdother": "stdother",

                        }}
                    }, self.BUILDER_HOSTNAME
                ],
                "kwargs": {"success_codes": [0, 1]},
                "expected_return": None,
                "expected_exception": None,
            },
            {
                "args": [
                    {
                        "dark": {},
                        "contacted": {self.BUILDER_HOSTNAME: {
                            "rc": 2,
                            "stdout": "stdout",
                            "stderr": "stderr",
                        }}
                    }, self.BUILDER_HOSTNAME
                ],
                "kwargs": {"err_codes": [2, 3]},
                "expected_return": None,
                "expected_exception": AnsibleResponseError
            },
            {
                "args": [
                    {
                        "dark": {},
                        "contacted": {self.BUILDER_HOSTNAME: {
                            "failed": True,
                            "stdout": "stdout",
                            "stderr": "stderr",
                            "stdother": "stdother",
                        }}
                    }, self.BUILDER_HOSTNAME
                ],
                "kwargs": {},
                "expected_return": None,
                "expected_exception": AnsibleResponseError
            }
        ]
        # counter = 0
        for case in cases:
            if case["expected_exception"]:
                with pytest.raises(case["expected_exception"]):
                    tested_func(*case["args"], **case["kwargs"])
            else:
                result = tested_func(*case["args"], **case["kwargs"])
                assert result == case["expected_return"]
示例#35
0
base_config = Munch(actions_added=False,
                    actorbatch=512,
                    add_rnd_steps_to_remain=0,
                    can_replace_proof=False,
                    curriculum_allowed=False,
                    curriculum_decay=0.8,
                    entcoeff=0.01,
                    evaldirs=["theorems/m2np/"],
                    evalprobcount=1,
                    evaltime=1,
                    evaltype="det",
                    failure_reward=0,
                    fast_features=True,
                    feature_file=None,
                    gamma=0.99,
                    graph_embedding=False,
                    graph_embedding_size=40,
                    graph_hidden_layers=[100, 100],
                    graph_node_count=40,
                    graph_update_iteration=3,
                    illegal_reward=0,
                    known_proof_max_exploration=0,
                    latent_dim=None,
                    lr_schedule="constant",
                    max_exploration=10,
                    model_type="ppo1",
                    n_action_slots=100,
                    n_dim=200,
                    network_layers=[512, 512, 512],
                    optim_batchsize=64,
                    optim_epochs=4,
                    optim_stepsize=0.00001,
                    outdir="results/noproof_m2np",
                    parallel_envs=30,
                    proof_dir="noproof",
                    quick_progress_percentage=0.9,
                    saved_model=None,
                    scheduler_starting_step=1,
                    scheduler_type="local",
                    steps_per_curriculum=2000,
                    supervised_reward=0,
                    terminate_on_illegal=False,
                    train_timesteps=[500000],
                    train_dirs=["theorems/m2np"],
                    use_previous_action=False,
                    value_gets_actions=False,
                    neptune=False,
                    tags=["m2np", "noproof"])
示例#36
0
def test_toJSON():
    b = Munch(foo=Munch(lol=True), hello=42, ponies='are pretty!')
    assert json.dumps(b) == b.toJSON()
示例#37
0
def test_toDict():
    b = Munch(foo=Munch(lol=True), hello=42, ponies='are pretty!')
    assert sorted(b.toDict().items()) == [('foo', {
        'lol': True
    }), ('hello', 42), ('ponies', 'are pretty!')]
示例#38
0
 def parameters(self):
     return Munch(hyper_n_channels=self.hyper_n_channels, ch_aln=self.ch_aln)
示例#39
0
class Solver(nn.Module):
    def __init__(self, args):

        super().__init__()
        self.args = args
        # self.device = porch.device('cuda' if porch.cuda.is_available() else 'cpu')
        print("Solver init....")
        self.nets, self.nets_ema = build_model(args)
        # below setattrs are to make networks be children of Solver, e.g., for self.to(self.device)
        for name, module in self.nets.items():
            utils.print_network(module, name)
            setattr(self, name, module)
        for name, module in self.nets_ema.items():
            setattr(self, name + '_ema', module)

        if args.mode == 'train':
            self.optims = Munch()
            for net in self.nets.keys():
                if net == 'fan':
                    continue
                self.optims[net] = porch.optim.Adam(
                    params=self.nets[net].parameters(),
                    lr=args.f_lr if net == 'mapping_network' else args.lr,
                    betas=[args.beta1, args.beta2],
                    weight_decay=args.weight_decay)

            self.ckptios = [
                CheckpointIO(ospj(args.checkpoint_dir, '{:06d}_nets_ema.ckpt'),
                             **self.nets),
                CheckpointIO(ospj(args.checkpoint_dir, '{:06d}_nets_ema.ckpt'),
                             **self.nets_ema),
                CheckpointIO(ospj(args.checkpoint_dir, '{:06d}_optims.ckpt'),
                             **self.optims)
            ]
        else:
            self.ckptios = [
                CheckpointIO(ospj(args.checkpoint_dir, '{:06d}_nets_ema.ckpt'),
                             **self.nets_ema)
            ]

    def _save_checkpoint(self, step):
        for ckptio in self.ckptios:
            ckptio.save(step)

    def _load_checkpoint(self, step):
        for ckptio in self.ckptios:
            ckptio.load(step)

    def _reset_grad(self):
        for optim in self.optims.values():
            optim.clear_gradients()

    def train(self, loaders):
        args = self.args
        nets = self.nets
        nets_ema = self.nets_ema
        optims = self.optims
        writer = LogWriter(logdir=self.args.checkpoint_dir + "/log/")

        # fetch random validation images for debugging
        fetcher = InputFetcher(loaders.src, loaders.ref, args.latent_dim,
                               'train')
        fetcher_val = InputFetcher(loaders.val, None, args.latent_dim, 'val')
        inputs_val = next(fetcher_val)

        # resume training if necessary
        if args.resume_iter > 0:
            self._load_checkpoint(args.resume_iter)

        # remember the initial value of ds weight
        initial_lambda_ds = args.lambda_ds

        print('Start training...')
        import tqdm
        start_time = time.time()
        tqdm_descriptor = tqdm.trange(args.resume_iter, args.total_iters)
        for i in tqdm_descriptor:
            # fetch images and labels
            inputs = next(fetcher)
            x_real, y_org = inputs.x_src, inputs.y_src
            x_ref, x_ref2, y_trg = inputs.x_ref, inputs.x_ref2, inputs.y_ref
            z_trg, z_trg2 = inputs.z_trg, inputs.z_trg2

            masks = nets.fan.get_heatmap(x_real) if args.w_hpf > 0 else None

            # train the discriminator
            d_loss, d_losses_latent = compute_d_loss(nets,
                                                     args,
                                                     x_real,
                                                     y_org,
                                                     y_trg,
                                                     z_trg=z_trg,
                                                     masks=masks)
            self._reset_grad()
            d_loss.backward()
            optims.discriminator.minimize(d_loss)

            d_loss, d_losses_ref = compute_d_loss(nets,
                                                  args,
                                                  x_real,
                                                  y_org,
                                                  y_trg,
                                                  x_ref=x_ref,
                                                  masks=masks)
            self._reset_grad()
            d_loss.backward()
            optims.discriminator.minimize(d_loss)

            # train the generator
            if i - args.resume_iter > 1:  ## train discriminator first
                g_loss, g_losses_latent, sample_1 = compute_g_loss(
                    nets,
                    args,
                    x_real,
                    y_org,
                    y_trg,
                    z_trgs=[z_trg, z_trg2],
                    masks=masks)
                self._reset_grad()
                g_loss.backward()
                optims.generator.minimize(g_loss)
                optims.mapping_network.minimize(g_loss)
                optims.style_encoder.minimize(g_loss)

                g_loss, g_losses_ref, sample_2 = compute_g_loss(
                    nets,
                    args,
                    x_real,
                    y_org,
                    y_trg,
                    x_refs=[x_ref, x_ref2],
                    masks=masks)
                self._reset_grad()
                g_loss.backward()
                optims.generator.minimize(g_loss)

                # # compute moving average of network parameters
                # moving_average(nets.generator, nets_ema.generator, beta=0.999)
                # moving_average(nets.mapping_network, nets_ema.mapping_network, beta=0.999)
                # moving_average(nets.style_encoder, nets_ema.style_encoder, beta=0.999)

                # decay weight for diversity sensitive loss
                if args.lambda_ds > 0:
                    args.lambda_ds -= (initial_lambda_ds / args.ds_iter)

                # print out log info
                if (i + 1) % args.print_every == 0:
                    elapsed = time.time() - start_time
                    elapsed = str(datetime.timedelta(seconds=elapsed))[:-7]
                    log = "Elapsed time [%s], Iteration [%i/%i], " % (
                        elapsed, i + 1, args.total_iters)
                    all_losses = dict()
                    for loss, prefix in zip([
                            d_losses_latent, d_losses_ref, g_losses_latent,
                            g_losses_ref
                    ], ['D/latent_', 'D/ref_', 'G/latent_', 'G/ref_']):
                        for key, value in loss.items():
                            all_losses[prefix + key] = value
                            writer.add_scalar(tag=prefix + key,
                                              step=i + 1,
                                              value=value)
                    all_losses['G/lambda_ds'] = args.lambda_ds
                    log += ' '.join([
                        '%s: [%.4f]' % (key, value)
                        for key, value in all_losses.items()
                    ])
                    tqdm_descriptor.set_description(log)
                    writer.add_image("x_fake",
                                     (utils.denormalize(sample_1) *
                                      255).numpy().transpose([1, 2, 0]).astype(
                                          np.uint8), i + 1)

                # generate images for debugging
                if (i + 1) % args.sample_every == 0:
                    os.makedirs(args.sample_dir, exist_ok=True)
                    utils.debug_image(nets_ema,
                                      args,
                                      inputs=inputs_val,
                                      step=i + 1)

                # save model checkpoints
                if (i + 1) % args.save_every == 0:
                    self._save_checkpoint(step=i + 1)

                # compute FID and LPIPS if necessary
                if (i + 1) % args.eval_every == 0:
                    calculate_metrics(nets_ema, args, i + 1, mode='latent')
                    calculate_metrics(nets_ema, args, i + 1, mode='reference')
            else:
                if (i + 1) % args.print_every == 0:
                    elapsed = time.time() - start_time
                    elapsed = str(datetime.timedelta(seconds=elapsed))[:-7]
                    log = "Elapsed time [%s], Iteration [%i/%i], " % (
                        elapsed, i + 1, args.total_iters)
                    all_losses = dict()
                    for loss, prefix in zip([d_losses_latent, d_losses_ref],
                                            ['D/latent_', 'D/ref_']):
                        for key, value in loss.items():
                            all_losses[prefix + key] = value
                            writer.add_scalar(tag=prefix + key,
                                              step=i + 1,
                                              value=value)
                    log += ' '.join([
                        '%s: [%.4f]' % (key, value)
                        for key, value in all_losses.items()
                    ])
                    tqdm_descriptor.set_description(log)

        writer.close()

    def sample(self, loaders):
        args = self.args
        nets_ema = self.nets_ema
        for name in self.nets_ema:
            self.nets_ema[name].eval()
        os.makedirs(args.result_dir, exist_ok=True)
        self._load_checkpoint(args.resume_iter)

        src = next(InputFetcher(loaders.src, None, args.latent_dim, 'test'))
        ref = next(InputFetcher(loaders.ref, None, args.latent_dim, 'test'))

        fname = ospj(args.result_dir, 'reference.jpg')
        print('Working on {}...'.format(fname))
        utils.translate_using_reference(nets_ema, args, src.x, ref.x, ref.y,
                                        fname)

        fname = ospj(args.result_dir, 'video_ref.mp4')
        print('Working on {}...'.format(fname))
        utils.video_ref(nets_ema, args, src.x, ref.x, ref.y, fname)
        for name in self.nets_ema:
            self.nets_ema[name].train()

    def evaluate(self):
        args = self.args
        nets_ema = self.nets_ema
        for name in self.nets_ema:
            self.nets_ema[name].eval()
        resume_iter = args.resume_iter
        print("check point loading.......")
        self._load_checkpoint(args.resume_iter)
        print("check point loaded.......")
        return calculate_metrics(nets_ema,
                                 args,
                                 step=resume_iter,
                                 mode='latent')
        # calculate_metrics(nets_ema, args, step=resume_iter, mode='reference')
        for name in self.nets_ema:
            self.nets_ema[name].train()
示例#40
0
    def setup_method(self, method):
        self.vm_spawn_min_interval = 30

        self.opts = Munch(
            redis_host="127.0.0.1",
            redis_db=9,
            redis_port=7777,
            ssh=Munch(transport="ssh"),
            build_groups_count=2,
            build_groups={
                0: {
                    "name": "base",
                    "archs": ["i386", "x86_64"],
                    "max_vm_total": 5,
                    "max_spawn_processes": 3,
                    "vm_spawn_min_interval": self.vm_spawn_min_interval,
                    "vm_dirty_terminating_timeout": 120,
                    "vm_health_check_period": 10,
                    "vm_health_check_max_time": 60,
                    "vm_terminating_timeout": 300,
                },
                1: {
                    "name": "arm",
                    "archs": ["armV7"],
                    "vm_spawn_min_interval": self.vm_spawn_min_interval,
                    "vm_dirty_terminating_timeout": 120,
                    "vm_health_check_period": 10,
                    "vm_health_check_max_time": 60,
                    "vm_terminating_timeout": 300,
                }
            },
            fedmsg_enabled=False,
            sleeptime=0.1,
            vm_cycle_timeout=10,
        )

        self.queue = Queue()

        self.vm_ip = "127.0.0.1"
        self.vm_name = "localhost"
        self.group = 0
        self.username = "******"

        self.rc = get_redis_connection(self.opts)
        self.ps = None
        self.log_msg_list = []

        self.callback = TestCallback()
        # checker = HealthChecker(self.opts, self.callback)
        self.checker = MagicMock()
        self.spawner = MagicMock()
        self.terminator = MagicMock()

        self.mc_logger = MagicMock()
        self.vmm = VmManager(self.opts, logger=self.mc_logger)

        self.event_handler = MagicMock()
        self.vm_master = VmMaster(
            self.opts,
            self.vmm,
            self.spawner,
            self.checker,
        )
        self.vm_master.event_handler = MagicMock()
        self.pid = 12345

        self.vm_ip = "127.0.0.1"
        self.vm_name = "build 12345"
示例#41
0
class VFSV1Generator(BaseGenerator):
    """
    Train and test a classifier using a scheme and a POI set.

    This is typically run on more than 1 scheme, for example
    the top N hits from survey.

    Assumptions:

    Generator-specific arguments:
    @--protein_of_interest="P10636-8"           # Only affects reporting downstream

    """

    schema = s(
        s.is_kws_r(
            **BaseGenerator.job_setup_schema.schema(),
            **BaseGenerator.protein_schema.schema(),
            **BaseGenerator.label_set_schema.schema(),
            **BaseGenerator.peptide_setup_schema.schema(),
            **BaseGenerator.sim_schema.schema(),
            **BaseGenerator.scheme_schema.schema(),
            **BaseGenerator.classifier_choice_schema.schema(),
            **BaseGenerator.scope_run_schema.schema(),
            # scoring_verbose=s.is_bool(
            #     help="Produce dyetrack-level debug info about nn_v2 scoring",
            #     noneable=True,
            # ),
            # scoring_verbose_cc=s.is_bool(
            #     help="Produce cycle/channel-level debug info about nn_v2 scoring",
            #     noneable=True,
            # ),
            # report_prec=s.is_list(
            #     elems=s.is_float(bounds=(0.001, 0.999)),
            #     help="The precision for classifier reporting",
            # ),
        )
    )

    defaults = Munch(
        n_edmans=10,
        n_pres=0,
        n_mocks=1,
        n_samples_train=5_000,
        n_samples_test=1_000,
        n_ptms_limit=0,
        allow_edman_cterm=False,
        decoys="none",
        random_seed=None,
        classifier="rf",
        protein_of_interest=None,
        use_lognormal_model=False,
        is_photobleaching_run=False,
        photobleaching_run_n_dye_count=None,
        # report_prec=[0.95, 0.9, 0.8],
        # scoring_verbose=False,
        # scoring_verbose_cc=False,
    )

    def apply_defaults(self):
        super().apply_defaults()

        # # Plumbum creates empty lists on list switches. This means
        # # that the apply defaults doesn't quite work right.
        # # TASK: Find a cleaner solution. For now hard-code
        # # if len(self.err_dye_beta) == 0:
        # #     self.err_dye_beta = self.defaults.dye_beta
        # # if len(self.dye_sigma) == 0:
        # #     self.dye_sigma = self.defaults.dye_sigma
        # if len(self.report_prec) == 0:
        #     self.report_prec = self.defaults.report_prec

    def run_parameter_permutator(self, use_lognormal_model=False):
        if self.is_photobleaching_run:
            # At some point there might be multi-channel bleaching
            # but I'd likely treat it as independent so for now settting to 1
            n_channels = 1
            err_set = self.photobleaching_err_set(n_channels, use_lognormal_model)
            yield None, None, err_set
        else:
            for protease, label_set, err_set in super().run_parameter_permutator(
                use_lognormal_model
            ):
                yield protease, label_set, err_set

    def generate(self):

        assert self.classifier in ("rf", "knn")

        run_descs = []

        if self.is_photobleaching_run:
            assert self.photobleaching_run_n_dye_count is not None

        for protease, aa_list, err_set in self.run_parameter_permutator(
            self.use_lognormal_model
        ):
            prep_task = {}
            if self.is_photobleaching_run:
                # Photobleaching is really a set of mock cycles
                # but due to idosyncriies of the simulator they
                # need to be made edman
                assert self.n_edmans == 0
                assert self.n_mocks > 0
                self.n_edmans = self.n_mocks
                self.n_mocks = 0

                assert self.photobleaching_run_n_dye_count > 0

                assert self.n_pres == 1
                prep_task = task_templates.prep(
                    None,
                    None,
                    None,
                    pois=None,
                    n_ptms_limit=None,
                    is_photobleaching_run=True,
                    photobleaching_run_n_dye_count=self.photobleaching_run_n_dye_count,
                    photobleaching_n_cycles=self.n_edmans
                    + 1,  # Really this is mocks, see above
                )
                aa_list = [PrepParams.PHOTOBLEACHING_PSEUDO_AA]

            else:
                prep_task = task_templates.prep(
                    self.protein,
                    protease,
                    self.decoys,
                    pois=self.protein_of_interest,
                    n_ptms_limit=self.n_ptms_limit,
                )

            sim_v2_task = task_templates.sim_v2(
                list(aa_list),
                err_set,
                n_pres=self.n_pres,
                n_mocks=self.n_mocks,
                n_edmans=self.n_edmans,
                n_samples_train=self.n_samples_train,
                n_samples_test=self.n_samples_test,
                allow_edman_cterm=self.allow_edman_cterm,
                use_lognormal_model=self.use_lognormal_model,
            )

            sim_v2_task.sim_v2.parameters.random_seed = self.random_seed

            rf_train_v2_task = {}
            rf_v2_task = {}
            knn_train_v1_task = {}
            knn_v1_task = {}
            eval_inputs = {}

            if self.classifier == "rf":
                sim_v2_task.sim_v2.parameters.train_includes_radmat = True
                rf_train_v2_task = task_templates.rf_train_v2()
                rf_v2_task = task_templates.rf_v2(extra_inputs=dict(sim_v2="../sim_v2"))
                eval_inputs["rf_v2"] = "../rf_v2"

            elif self.classifier == "knn":
                sim_v2_task.sim_v2.parameters.train_includes_radmat = True
                knn_train_v1_task = task_templates.knn_train_v1()
                knn_v1_task = task_templates.knn_v1(
                    extra_inputs=dict(sim_v2="../sim_v2")
                )
                eval_inputs["knn_v1"] = "../knn_v1"

            else:
                raise ValueError(f"Unknown classifier type {self.classifier}.")

            eval_v1_task = task_templates.eval_v1(inputs=eval_inputs)

            # if self.nn_v2:
            #     sigproc_relative_path = None
            #     rad_filter_relative_path = None
            #     if sigproc_tasks:
            #         sigproc_relative_path = f"../sigproc_v2"
            #         rad_filter_relative_path = f"../rad_filter"
            #
            #     nn_v2_task = task_templates.nn_v2(
            #         sigproc_folder=sigproc_relative_path,
            #         err_set=err_set,
            #         prep_folder="../prep",
            #         sim_v2_folder="../sim_v2",
            #         rad_filter_folder=rad_filter_relative_path,
            #         scoring_verbose=self.scoring_verbose,
            #         scoring_verbose_cc=self.scoring_verbose_cc,
            #         use_lognormal_model=self.use_lognormal_model,
            #     )

            e_block = self.erisyon_block(aa_list, protease, err_set)

            run_name = f"{e_block._erisyon.run_name}"
            if self.force_run_name is not None:
                run_name = self.force_run_name

            run_desc = Munch(
                run_name=run_name,
                **e_block,
                **prep_task,
                **sim_v2_task,
                **rf_train_v2_task,
                **rf_v2_task,
                **knn_train_v1_task,
                **knn_v1_task,
                **eval_v1_task,
            )
            run_descs += [run_desc]

        self.static_reports += ["vfs"]

        return run_descs
示例#42
0
    def generate(self):

        assert self.classifier in ("rf", "knn")

        run_descs = []

        if self.is_photobleaching_run:
            assert self.photobleaching_run_n_dye_count is not None

        for protease, aa_list, err_set in self.run_parameter_permutator(
            self.use_lognormal_model
        ):
            prep_task = {}
            if self.is_photobleaching_run:
                # Photobleaching is really a set of mock cycles
                # but due to idosyncriies of the simulator they
                # need to be made edman
                assert self.n_edmans == 0
                assert self.n_mocks > 0
                self.n_edmans = self.n_mocks
                self.n_mocks = 0

                assert self.photobleaching_run_n_dye_count > 0

                assert self.n_pres == 1
                prep_task = task_templates.prep(
                    None,
                    None,
                    None,
                    pois=None,
                    n_ptms_limit=None,
                    is_photobleaching_run=True,
                    photobleaching_run_n_dye_count=self.photobleaching_run_n_dye_count,
                    photobleaching_n_cycles=self.n_edmans
                    + 1,  # Really this is mocks, see above
                )
                aa_list = [PrepParams.PHOTOBLEACHING_PSEUDO_AA]

            else:
                prep_task = task_templates.prep(
                    self.protein,
                    protease,
                    self.decoys,
                    pois=self.protein_of_interest,
                    n_ptms_limit=self.n_ptms_limit,
                )

            sim_v2_task = task_templates.sim_v2(
                list(aa_list),
                err_set,
                n_pres=self.n_pres,
                n_mocks=self.n_mocks,
                n_edmans=self.n_edmans,
                n_samples_train=self.n_samples_train,
                n_samples_test=self.n_samples_test,
                allow_edman_cterm=self.allow_edman_cterm,
                use_lognormal_model=self.use_lognormal_model,
            )

            sim_v2_task.sim_v2.parameters.random_seed = self.random_seed

            rf_train_v2_task = {}
            rf_v2_task = {}
            knn_train_v1_task = {}
            knn_v1_task = {}
            eval_inputs = {}

            if self.classifier == "rf":
                sim_v2_task.sim_v2.parameters.train_includes_radmat = True
                rf_train_v2_task = task_templates.rf_train_v2()
                rf_v2_task = task_templates.rf_v2(extra_inputs=dict(sim_v2="../sim_v2"))
                eval_inputs["rf_v2"] = "../rf_v2"

            elif self.classifier == "knn":
                sim_v2_task.sim_v2.parameters.train_includes_radmat = True
                knn_train_v1_task = task_templates.knn_train_v1()
                knn_v1_task = task_templates.knn_v1(
                    extra_inputs=dict(sim_v2="../sim_v2")
                )
                eval_inputs["knn_v1"] = "../knn_v1"

            else:
                raise ValueError(f"Unknown classifier type {self.classifier}.")

            eval_v1_task = task_templates.eval_v1(inputs=eval_inputs)

            # if self.nn_v2:
            #     sigproc_relative_path = None
            #     rad_filter_relative_path = None
            #     if sigproc_tasks:
            #         sigproc_relative_path = f"../sigproc_v2"
            #         rad_filter_relative_path = f"../rad_filter"
            #
            #     nn_v2_task = task_templates.nn_v2(
            #         sigproc_folder=sigproc_relative_path,
            #         err_set=err_set,
            #         prep_folder="../prep",
            #         sim_v2_folder="../sim_v2",
            #         rad_filter_folder=rad_filter_relative_path,
            #         scoring_verbose=self.scoring_verbose,
            #         scoring_verbose_cc=self.scoring_verbose_cc,
            #         use_lognormal_model=self.use_lognormal_model,
            #     )

            e_block = self.erisyon_block(aa_list, protease, err_set)

            run_name = f"{e_block._erisyon.run_name}"
            if self.force_run_name is not None:
                run_name = self.force_run_name

            run_desc = Munch(
                run_name=run_name,
                **e_block,
                **prep_task,
                **sim_v2_task,
                **rf_train_v2_task,
                **rf_v2_task,
                **knn_train_v1_task,
                **knn_v1_task,
                **eval_v1_task,
            )
            run_descs += [run_desc]

        self.static_reports += ["vfs"]

        return run_descs
示例#43
0
import json, jsonpatch, jsonpath_ng
import mako
import networkx as nx
import os
import sys
import re
import sys
import tarfile
import tempfile
import urllib.request
import zipfile

root = os.path.join(os.path.dirname(__file__), '..')

print('parsing Zotero/Juris-M schemas')
SCHEMA = Munch(root=os.path.join(root, 'schema'))
ITEMS = os.path.join(root, 'gen/items')
TYPINGS = os.path.join(root, 'gen/typings')

os.makedirs(SCHEMA.root, exist_ok=True)
os.makedirs(ITEMS, exist_ok=True)
os.makedirs(TYPINGS, exist_ok=True)


class fetch(object):
    def __init__(self, client):
        if client == 'zotero':
            releases = urlopen(
                "https://www.zotero.org/download/client/manifests/release/updates-linux-x86_64.json"
            ).read().decode("utf-8")
            releases = json.loads(releases)
示例#44
0
 def parameters(self):
     return Munch(mu=self.mu, sigma=self.sigma,)
示例#45
0
def test_check_copr_build(clean_before_and_after, packit_build_752):
    flexmock(Client).should_receive("create_from_config_file").and_return(
        Client(None))
    flexmock(CoprBuildEvent).should_receive("get_package_config").and_return(
        PackageConfig(jobs=[
            JobConfig(
                type=JobType.copr_build,
                trigger=JobConfigTriggerType.pull_request,
                metadata=JobMetadataConfig(targets=[
                    "fedora-30-x86_64",
                    "fedora-rawhide-x86_64",
                    "fedora-31-x86_64",
                    "fedora-32-x86_64",
                ]),
            )
        ]))
    coprs_response = Munch({
        "chroots": [
            "fedora-30-x86_64",
            "fedora-rawhide-x86_64",
            "fedora-31-x86_64",
            "fedora-32-x86_64",
        ],
        "ended_on":
        1583916564,
        "id":
        1300329,
        "ownername":
        "packit",
        "project_dirname":
        "packit-service-packit-752",
        "projectname":
        "packit-service-packit-752",
        "repo_url": ("https://download.copr.fedorainfracloud.org/"
                     "results/packit/packit-service-packit-752"),
        "source_package": {
            "name":
            "packit",
            "url":
            ("https://download.copr.fedorainfracloud.org/"
             "results/packit/packit-service-packit-752/"
             "srpm-builds/01300329/packit-0.8.2.dev122g64ebb47-1.fc31.src.rpm"
             ),
            "version":
            "0.8.2.dev122+g64ebb47-1.fc31",
        },
        "started_on":
        1583916315,
        "state":
        "succeeded",
        "submitted_on":
        1583916261,
        "submitter":
        "packit",
    })
    flexmock(BuildProxy).should_receive("get").and_return(coprs_response)

    chroot_response = Munch({
        "ended_on":
        1583916564,
        "name":
        "fedora-rawhide-x86_64",
        "result_url":
        "https://download.copr.fedorainfracloud.org/"
        "results/packit/packit-service-packit-752/fedora-rawhide-x86_64/"
        "01300329-packit/",
        "started_on":
        1583916315,
        "state":
        "succeeded",
    })
    flexmock(BuildChrootProxy).should_receive("get").with_args(
        BUILD_ID, "fedora-rawhide-x86_64").and_return(chroot_response)

    # Reporting
    flexmock(GithubProject).should_receive("get_pr").and_return(flexmock())
    flexmock(GithubProject).should_receive("get_pr_comments").and_return([])
    flexmock(GithubProject).should_receive("pr_comment").and_return()
    flexmock(GithubProject).should_receive(
        "set_commit_status").and_return().once()

    check_copr_build(BUILD_ID)
    assert packit_build_752.status == PG_COPR_BUILD_STATUS_SUCCESS
示例#46
0
base_config = Munch(
    actions_added=False,
    actorbatch=512,
    add_repeating_pretraining=False,
    add_rnd_steps_to_remain=0,
    can_replace_proof=True,
    curriculum_allowed=True,
    curriculum_decay=0.8,
    det_steps=True,
    entcoeff=0.01,
    episodes_per_problem=5,
    evaldirs=["theorems/robinson/robinson_1p1__2"],
    evalcount=100,
    evaltime=1,
    evaltype="mcts",
    failure_reward=0,
    fast_features=True,
    feature_file=None,
    gamma=0.99,
    graph_embedding=False,
    graph_embedding_size=40,
    graph_hidden_layers=[100, 100],
    graph_node_count=40,
    graph_update_iteration=3,
    illegal_reward=0,
    known_proof_max_exploration=0,
    latent_dim=None,
    lr_schedule="constant",
    max_exploration=5,
    model_type="ppo1",
    n_action_slots=22,
    n_dim=100,
    network_layers=[512, 512, 512],
    optim_batchsize=64,
    optim_epochs=4,
    optim_stepsize=0.0001,
    outdir="results/experiment2",
    parallel_envs=1,
    proof_dir="noproof",
    quick_progress_percentage=0.9,
    saved_model=None,
    scheduler_starting_step=1,
    scheduler_type="local",
    steps_per_curriculum=100000,
    supervised_reward=0,
    terminate_on_illegal=False,
    train_timesteps=[10000],
    train_dirs=["theorems/robinson/robinson_1p1__2"],
    # train_dirs=["theorems/robinson/robinson_noproof/noproof1"],
    # train_dirs=["theorems/pelletier21.p"],
    use_previous_state=True,
    use_previous_action=True,
    use_action_shuffle=False,
    use_mcts=False,
    use_remove=False,
    use_replay=True,
    use_shortest_proof=True,
    value_gets_actions=False,
    neptune=False,
    tags=["experiment2"])
示例#47
0
def test_base():
    b = Munch()
    b.hello = 'world'
    assert b.hello == 'world'
    b['hello'] += "!"
    assert b.hello == 'world!'
    b.foo = Munch(lol=True)
    assert b.foo.lol is True
    assert b.foo is b['foo']

    assert sorted(b.keys()) == ['foo', 'hello']

    b.update({'ponies': 'are pretty!'}, hello=42)
    assert b == Munch({
        'ponies': 'are pretty!',
        'foo': Munch({'lol': True}),
        'hello': 42
    })

    assert sorted([(k, b[k]) for k in b]) == [('foo', Munch({'lol': True})),
                                              ('hello', 42),
                                              ('ponies', 'are pretty!')]

    assert "The {knights} who say {ni}!".format(**Munch(
        knights='lolcats', ni='can haz')) == 'The lolcats who say can haz!'
示例#48
0
def test_dir():
    m = Munch(a=1, b=2)
    assert dir(m) == ['a', 'b']
示例#49
0
def test_copy():
    m = Munch(urmom=Munch(sez=Munch(what='what')))
    c = m.copy()
    assert c.urmom.sez.what == 'what'
示例#50
0
    def _read_unsafe(self):
        cp = ConfigParser.ConfigParser()
        cp.read(self.config_file)

        opts = Munch()

        opts.results_baseurl = _get_conf(cp, "backend", "results_baseurl",
                                         "http://copr-be")

        opts.frontend_base_url = _get_conf(cp, "backend", "frontend_base_url",
                                           "http://copr-fe")

        opts.dist_git_url = _get_conf(cp, "backend", "dist_git_url",
                                      "http://dist-git")

        opts.frontend_auth = _get_conf(cp, "backend", "frontend_auth",
                                       "PASSWORDHERE")

        opts.redis_host = _get_conf(cp, "backend", "redis_host", "127.0.0.1")

        opts.redis_port = _get_conf(cp, "backend", "redis_port", "6379")

        opts.redis_db = _get_conf(cp, "backend", "redis_db", "0")

        opts.do_sign = _get_conf(cp, "backend", "do_sign", False, mode="bool")

        opts.keygen_host = _get_conf(cp, "backend", "keygen_host",
                                     "copr-keygen.cloud.fedoraproject.org")

        opts.build_user = _get_conf(cp, "backend", "build_user",
                                    DEF_BUILD_USER)

        opts.build_groups_count = _get_conf(cp,
                                            "backend",
                                            "build_groups",
                                            1,
                                            mode="int")

        opts.build_groups = []
        for group_id in range(opts.build_groups_count):
            archs = _get_conf(cp,
                              "backend",
                              "group{0}_archs".format(group_id),
                              default="i386,x86_64").split(",")
            group = {
                "id":
                int(group_id),
                "name":
                _get_conf(cp, "backend", "group{0}_name".format(group_id),
                          "PC"),
                "archs":
                archs,
                "spawn_playbook":
                _get_conf(cp,
                          "backend",
                          "group{0}_spawn_playbook".format(group_id),
                          default="/srv/copr-work/provision/builderpb-PC.yml"),
                "terminate_playbook":
                _get_conf(
                    cp,
                    "backend",
                    "group{0}_terminate_playbook".format(group_id),
                    default="/srv/copr-work/provision/terminatepb-PC.yml"),
                "max_workers":
                _get_conf(cp,
                          "backend",
                          "group{0}_max_workers".format(group_id),
                          default=32,
                          mode="int"),
                "max_vm_total":
                _get_conf(
                    cp,
                    "backend",
                    "group{}_max_vm_total".format(group_id),
                    # default=16, mode="int"),
                    default=8,
                    mode="int"),
                "max_vm_per_user":
                _get_conf(cp,
                          "backend",
                          "group{}_max_vm_per_user".format(group_id),
                          default=4,
                          mode="int"),
                "max_builds_per_vm":
                _get_conf(cp,
                          "backend",
                          "group{}_max_builds_per_vm".format(group_id),
                          default=10,
                          mode="int"),
                "max_spawn_processes":
                _get_conf(cp,
                          "backend",
                          "group{}_max_spawn_processes".format(group_id),
                          default=2,
                          mode="int"),
                "vm_spawn_min_interval":
                _get_conf(cp,
                          "backend",
                          "group{}_vm_spawn_min_interval".format(group_id),
                          default=30,
                          mode="int"),
                "vm_dirty_terminating_timeout":
                _get_conf(
                    cp,
                    "backend",
                    "group{}_vm_dirty_terminating_timeout".format(group_id),
                    default=120,
                    mode="int"),
                "vm_health_check_period":
                _get_conf(cp,
                          "backend",
                          "group{}_vm_health_check_period".format(group_id),
                          default=120,
                          mode="int"),
                "vm_health_check_max_time":
                _get_conf(cp,
                          "backend",
                          "group{}_vm_health_check_max_time".format(group_id),
                          default=300,
                          mode="int"),
                "vm_max_check_fails":
                _get_conf(cp,
                          "backend",
                          "group{}_vm_max_check_fails".format(group_id),
                          default=2,
                          mode="int"),
                "vm_terminating_timeout":
                _get_conf(cp,
                          "backend",
                          "group{}_vm_terminating_timeout".format(group_id),
                          default=600,
                          mode="int"),
            }
            opts.build_groups.append(group)

        opts.vm_cycle_timeout = _get_conf(cp,
                                          "backend",
                                          "vm_cycle_timeout",
                                          default=10,
                                          mode="int")
        opts.vm_ssh_check_timeout = _get_conf(cp,
                                              "backend",
                                              "vm_ssh_check_timeout",
                                              default=5,
                                              mode="int")

        opts.destdir = _get_conf(cp, "backend", "destdir", None, mode="path")

        opts.exit_on_worker = _get_conf(cp,
                                        "backend",
                                        "exit_on_worker",
                                        False,
                                        mode="bool")
        opts.fedmsg_enabled = _get_conf(cp,
                                        "backend",
                                        "fedmsg_enabled",
                                        False,
                                        mode="bool")
        opts.sleeptime = _get_conf(cp, "backend", "sleeptime", 10, mode="int")
        opts.timeout = _get_conf(cp,
                                 "builder",
                                 "timeout",
                                 DEF_BUILD_TIMEOUT,
                                 mode="int")
        opts.consecutive_failure_threshold = _get_conf(
            cp,
            "builder",
            "consecutive_failure_threshold",
            DEF_CONSECUTIVE_FAILURE_THRESHOLD,
            mode="int")
        opts.log_dir = _get_conf(cp, "backend", "log_dir", "/var/log/copr/")
        opts.log_level = _get_conf(cp, "backend", "log_level", "info")
        opts.verbose = _get_conf(cp, "backend", "verbose", False, mode="bool")

        opts.prune_days = _get_conf(cp,
                                    "backend",
                                    "prune_days",
                                    None,
                                    mode="int")

        # ssh options
        opts.ssh = Munch()
        # TODO: ansible Runner show some magic bugs with transport "ssh", using paramiko
        opts.ssh.transport = _get_conf(cp, "ssh", "transport", "paramiko")

        # thoughts for later
        # ssh key for connecting to builders?
        # cloud key stuff?
        #
        return opts
示例#51
0
def test_fromDict():
    b = Munch.fromDict({'urmom': {'sez': {'what': 'what'}}})
    assert b.urmom.sez.what == 'what'
    else:
        head = torch.arange(N) + (H-N)
        tail = torch.arange(H-N)

    # permutation indices
    perm = torch.cat([head, tail]).to(x.device)
    out = x[:, :, perm, :]
    return out


IDXPAIR = namedtuple('IDXPAIR', 'start end')
index_map = Munch(chin=IDXPAIR(0 + 8, 33 - 8),
                  eyebrows=IDXPAIR(33, 51),
                  eyebrowsedges=IDXPAIR(33, 46),
                  nose=IDXPAIR(51, 55),
                  nostrils=IDXPAIR(55, 60),
                  eyes=IDXPAIR(60, 76),
                  lipedges=IDXPAIR(76, 82),
                  lipupper=IDXPAIR(77, 82),
                  liplower=IDXPAIR(83, 88),
                  lipinner=IDXPAIR(88, 96))
OPPAIR = namedtuple('OPPAIR', 'shift resize')


def preprocess(x):
    """Preprocess 98-dimensional heatmaps."""
    N, C, H, W = x.size()
    x = truncate(x)
    x = normalize(x)

    sw = H // 256
    operations = Munch(chin=OPPAIR(0, 3),
示例#53
0
    def __init__(self, env, agent_config: Munch):
        """
        Initialize everything you need here.
        For example:
            paramters for neural network
            initialize Q net and target Q net
            parameters for repaly buffer
            parameters for q-learning; decaying epsilon-greedy
            ...
        """

        super(DQNAgent, self).__init__(env=env, agent_config=agent_config)
        # make sure that the environment is an Atari/OpenAI one!
        assert isinstance(env, AtariEnvironment)
        # Declare primitive variables

        self.state = Munch({**self.state,
                            "num_actions": env.action_space.n,
                            "cur_eps": None,
                            "t": 0,
                            "ep_len": 0,
                            "mode": None,
                            "position": 0,
                            })
        self.reward_list = deque(maxlen=agent_config.window)
        self.max_q_list = deque(maxlen=agent_config.window)
        self.loss_list = deque(maxlen=agent_config.window)
        self.probability_list = np.zeros(env.action_space.n, np.float32)
        self.action_list = np.arange(env.action_space.n)
        
        self.state.eps_delta = (self.state.config.eps - self.state.config.eps_min) / self.state.config.eps_decay_window

        if self.state.config.use_pri_buffer:
            self.replay_buffer = PrioritizedBuffer(capacity=self.state.config.capacity, args=self.state.config)
        else:
            self.replay_buffer = ReplayBuffer(capacity=self.state.config.capacity, args=self.state.config)

        self.env = env
        self.meta = None
        # Create Policy and Target Networks
        self.policy_net = CNNModel(env, self.state.config).to(self.state.config.device)
        self.target_net = CNNModel(env, self.state.config).to(self.state.config.device)
        self.target_net.load_state_dict(self.policy_net.state_dict())
        self.optimizer = optim.Adam(self.policy_net.parameters(), lr=1.5e-4, eps=0.001)
        # Compute Huber loss
        self.loss = F.smooth_l1_loss

        # todo: Support for Multiprocessing. Bug in pytorch - https://github.com/pytorch/examples/issues/370
        # self.policy_net.share_memory()
        # self.target_net.share_memory()

        # Set defaults for networks
        self.policy_net.train()
        self.target_net.eval()
        self.target_net.load_state_dict(self.policy_net.state_dict())

        # if args.test_dqn:
        #     # you can load your model here
        #     ###########################
        #     # YOUR IMPLEMENTATION HERE #
        #     print('loading trained model')
        #     self.load_model()

        if agent_config.use_pri_buffer:
            logger.info('Using priority buffer . . .')
        if agent_config.use_double_dqn:
            logger.info('Using double dqn . . .')
示例#54
0
class SurveyV2Params(Params):
    # Maybe I'll inherit params from TestNN, but let's see what we want first...

    defaults = Munch()
    schema = s(s.is_kws_r())
示例#55
0
    parser.add_argument(
        '--data.name',
        type=str,
        metavar='MODELPATH',
        help="location of pretrained model weights to evaluate")
    parser.add_argument(
        '--data.root',
        type=str,
        help='output directory to which results should be saved')
    parser.add_argument('--data.class_count',
                        type=int,
                        help='total number of classes in dataset')
    parser.add_argument('--per_class_count',
                        type=int,
                        help="How many training data points per class")
    parser.add_argument('--val_size',
                        type=int,
                        help="Total number of validation points")
    args = vars(parser.parse_args())

    opt = {}
    for k, v in args.items():
        cur = opt
        tokens = k.split('.')
        for token in tokens[:-1]:
            if token not in cur:
                cur[token] = {}
            cur = cur[token]
        cur[tokens[-1]] = v
    main(Munch.fromDict(opt))
示例#56
0
    def save(self):
        stringizer = lambda x: self.dg.nodes[x]['name'
                                                ] if x in self.dg.nodes else x

        # remove multi-line text fields
        for node, data in list(self.dg.nodes(data=True)):
            if data['domain'] + '.' + data['name'] in [
                    'zotero.abstractNote', 'zotero.extra', 'csl.abstract',
                    'csl.note'
            ]:
                self.dg.remove_node(node)

        # remove two or more incoming var edges, as that would incur overwrites (= data loss)
        removed = set()
        for node, data in self.dg.nodes(data=True):
            incoming = reduce(
                lambda acc, edge: acc[self.dg.nodes[edge[0]]['domain']].append(
                    edge) or acc, self.dg.in_edges(node),
                Munch(zotero=[], csl=[], label=[]))
            for domain, edges in incoming.items():
                if domain == 'label' or len(edges) < 2: continue

                self.changeid += 1
                for edge in edges:
                    removed.add(edge)
                    self.dg.edges[edge].update({
                        'removed':
                        True,
                        'label':
                        self.add_change(self.dg.edges[edge].get('label'),
                                        self.changeid),
                        'graphics': {
                            'style': 'dashed',
                            'fill': self.color.removed,
                            'targetArrow': 'standard'
                        },
                        'LabelGraphics': {
                            'color': self.color.label
                        },
                    })

        # hop-through labels. Memorize here which labels had a direct connection *before any expansion*
        labels = {
            label: set([
                self.dg.nodes[edge[1]]['domain']
                for edge in self.dg.out_edges(label)
            ])
            for label, data in self.dg.nodes(data=True)
            if data['domain'] == 'label' and not re.search(
                r'[-_A-Z]', data['name'])  # a label but not a shadow label
        }
        for u, vs in dict(
                nx.all_pairs_dijkstra_path(
                    self.dg,
                    weight=lambda u, v, d: None
                    if d.get('removed', False) else 1)).items():
            # only interested in shortest paths that originate in a label
            if not u in labels: continue

            for v, path in vs.items():
                if u == v: continue  # no loops obviously
                if self.dg.has_edge(u, v): continue  # already in place
                if len(path) != 3:
                    continue  # only consider one-step hop-through

                # TODO: label already has direct edge to the hop-through domain -- this entails fanning out the data unnecesarily
                if self.dg.nodes[v]['domain'] in labels[u]: continue

                self.changeid += 1
                for edge in zip(path, path[1:]):
                    self.dg.edges[edge].update({
                        'label':
                        self.add_change(self.dg.edges[edge].get('label'),
                                        self.changeid),
                    })
                self.dg.add_edge(u,
                                 v,
                                 label=str(self.changeid),
                                 added=True,
                                 graphics={
                                     'style': 'dashed',
                                     'fill': self.color.added,
                                     'targetArrow': 'standard'
                                 })

        for u, vs in dict(nx.all_pairs_shortest_path(self.dg)).items():
            if self.dg.nodes[u]['domain'] != 'label': continue
            for v, path in vs.items():
                # length of 3 means potential hop-through node
                if u != v and len(path) == 3 and len(
                        set(zip(path, path[1:])).intersection(removed)) > 0:
                    #print('removed', path)
                    pass

        #for i, sg in enumerate(nx.weakly_connected_components(self.dg)):
        #  nx.draw(self.dg.subgraph(sg), with_labels=True)
        #  plt.savefig(f'{i}.png')

        mapping = {}
        for label, data in list(self.dg.nodes(data=True)):
            if data['domain'] != 'label': continue
            name = data['name']

            var_nodes = [var for _, var in self.dg.out_edges(label)]
            if len(var_nodes) == 0:
                self.dg.remove_node(label)
            else:
                for var in var_nodes:
                    var = self.dg.nodes[var]
                    if not name in mapping: mapping[name] = {}
                    assert 'type' not in mapping[name] or mapping[name][
                        'type'] == var['type']
                    mapping[name]['type'] = var['type']

                    domain = var['domain']
                    if not domain in mapping[name]: mapping[name][domain] = []
                    mapping[name][domain].append(var['name'])

        # ensure names don't get mapped to multiple fields
        for var, mapped in mapping.items():
            if mapped['type'] != 'name': continue
            assert len(mapped.get('zotero', [])) <= 1, (var, mapped)
            assert len(mapped.get('csl', [])) <= 1, (var, mapped)

        # docs
        with open(
                os.path.join(root, 'site/layouts/shortcodes/extra-fields.md'),
                'w') as f:
            writer = MarkdownTableWriter()
            writer.headers = ['label', 'type', 'zotero/jurism', 'csl']
            writer.value_matrix = []
            doc = {}
            for label, data in self.dg.nodes(data=True):
                if not ' ' in label or data['domain'] != 'label': continue
                name = data['name']
                doc[name] = {'zotero': [], 'csl': []}
                for _, to in self.dg.out_edges(label):
                    data = self.dg.nodes[to]

                    if not 'type' in doc[name]:
                        doc[name]['type'] = data['type']
                    else:
                        assert doc[name]['type'] == data['type']

                    if data.get('zotero', False) == data.get('jurism', False):
                        postfix = ''
                    elif data.get('zotero'):
                        postfix = '\u00B2'
                    else:
                        postfix = '\u00B9'
                    doc[name][data['domain']].append(
                        data['name'].replace('_', '\\_') + postfix)
            for label, data in sorted(doc.items(), key=lambda x: x[0]):
                writer.value_matrix.append((f'**{label}**', data['type'],
                                            ' / '.join(sorted(data['zotero'])),
                                            ' / '.join(sorted(data['csl']))))
            writer.stream = f
            writer.write_table()

        with open(os.path.join(ITEMS, 'extra-fields.json'), 'w') as f:
            json.dump(mapping, f, sort_keys=True, indent='  ')

        # remove phantom labels for clarity
        for label in [
                node for node, data in self.dg.nodes(data=True)
                if data['domain'] == 'label' and 'LabelGraphics' in data
        ]:
            self.dg.remove_node(label)
        nx.write_gml(self.dg, 'mapping.gml', stringizer)
    **{k: v
       for k, v in config.model.items() if k != '_class_'})
net.to(config.opts.device)

OptimizerClass = load_class(config.optimizer._class_)
optimizer: optim.Optimizer = OptimizerClass(
    params=net.parameters(),
    **{k: v
       for k, v in config.optimizer.items() if k != '_class_'})

if config.training.restore:
    train_state = saver.load(model=net,
                             optimizer=optimizer,
                             device=config.training.device)
else:
    train_state = Munch(epochs=0, samples=0)

if config.opts.log:
    with open(folder_run / 'config.yml', mode='w') as f:
        f.write(config.toYAML())
    logger.add_text('Config',
                    textwrap.indent(config.toYAML(), '    '),
                    global_step=train_state.samples)


def make_dataloader(dataset, shuffle) -> data.DataLoader:
    return data.DataLoader(dataset,
                           batch_size=config.training.batch_size,
                           collate_fn=tg.GraphBatch.collate,
                           num_workers=config.opts.cpus,
                           shuffle=shuffle,
示例#58
0
def test_verify_password_is_false_when_user_pw_is_none():
    login = Login()
    login.user = Munch(password=None)
    assert login.verify_password(False) is False
示例#59
0
def read(file):
    file = str(file)
    ext = file.rsplit(".")[-1].lower()
    if ext in ("yml", "yaml"):
        with open(file, "r") as f:
            yml = yaml.load(f, Loader=yaml.FullLoader)
            return Munch.fromDict(yml)
    if ext in ("md", ):
        md = Munch(meta=None, md=None, html=None)
        with open(file, "r") as f:
            for l in f.readlines():
                sl = unidecode.unidecode(l.strip())
                if md.md is not None:
                    md.md.append(l)
                    continue
                if sl == '---':
                    if md.meta is None:
                        md.meta = []
                    else:
                        md.md = []
                    continue
                md.meta.append(l)
        md.meta = "".join(md.meta)
        md.md = "".join(md.md)
        md.html = markdown(md.md, extensions=['extra'])
        md.meta = yaml.safe_load(md.meta)
        md.meta = Munch.fromDict(md.meta)
        if file.endswith("/metrica_v3.md"):
            file = file.replace("/content/posts/", "/output/")
            file = file.rsplit(".", 1)[0] + ".html"
            if isfile(file):
                with open(file, "r") as f:
                    html = f.read()
                soup = bs4.BeautifulSoup(html, "lxml")
                a = soup.find("article")
                a.name = "div"
                md.html = str(a)
        return md

    with open(file, "r") as f:
        return f.read()
def munch_from_object(data, format="yaml"):
    if format.lower() == 'json':
        return Munch.fromJSON(data)
    else:
        return Munch.fromYAML(data)