Beispiel #1
0
 def test_load_from_ext_id_raises(self):
     m_session = MagicMock()
     ext_id = "whatever"
     m_session.query.side_effect = NoResultFound()
     with self.assertRaises(IrmaDatabaseResultNotFound):
         FileExt.load_from_ext_id(ext_id, m_session)
Beispiel #2
0
def query(query_json):
    file_format = 'csv' if 'csv' in request.headers.get(
        'accept', 'json').lower() else 'json'
    try:
        created = None
        model_id = get_model_id(query_json)
        model = PathogenModel.query.filter(
            PathogenModel.id == model_id).order_by(
                PathogenModel.created.desc()).first()
        if model is None:
            raise NoResultFound(
                f"Could not find the model with the id {model_id} from query string: {json.dumps(query_json)}"
            )

        # We have our model, lets check to see if we alread have a worker container
        s = None
        try:
            container = client.containers.get(f'sfim-{model_id}')
        except docker.errors.NotFound:
            container = None

        # start container if it is not running
        if container is None:
            image = current_app.config['WORKER_IMAGE']
            container_volumes = {
                current_app.config['MODEL_HOST_PATH']: {
                    'bind': '/worker_model_store',
                    'mode': 'ro'
                }
            }
            container_env = dict(MODEL_STORE="/worker_model_store")
            container = client.containers.run(image,
                                              name=f"sfim-{model_id}",
                                              tty=True,
                                              detach=True,
                                              environment=container_env,
                                              volumes=container_volumes,
                                              stdin_open=True,
                                              auto_remove=True)
            s = container.attach_socket(params={'stdin': 1, 'stream': 1})
            # initialize our model by loading
            s._sock.send(
                f'library(modelServR)\nmodel <- loadModelFileById("{model_id}")\n'
                .encode('utf-8'))

        # if we need to connect to an existing container, do do now
        if s is None:
            s = container.attach_socket(params={'stdin': 1, 'stream': 1})

        # define where we want our output written too
        outfile = str(uuid.uuid4())

        # Run our query against the model(should already be loaded)
        command = f'queryLoadedModel(model, "{outfile}", format="{file_format}")\n'
        s._sock.send(command.encode('utf-8'))
        s.close()

        # Fetch our result

        x = 0
        file_json = None
        while x < 3 and file_json is None:
            try:
                file_json = container.get_archive(f'/tmp/{outfile}')
            except docker.errors.NotFound:
                file_json = None
            x += 1
            time.sleep(0.05)

        if file_json is None:
            raise ModelExecutionException(
                f"Problem executing the model {model_id}. "
                f"Could not get the model response from {outfile}")

        # Fetch data from stream
        # TODO , in prod maybe stream to user?
        stream, stat = file_json
        file_obj = BytesIO()
        for i in stream:
            file_obj.write(i)
        file_obj.seek(0)
        tar = tarfile.open(mode='r', fileobj=file_obj)
        text = tar.extractfile(outfile)

        return send_file(text,
                         as_attachment=False,
                         mimetype='application/json'
                         if file_format == 'json' else 'text/csv')
    # Errors we want to rethrow
    except NoResultFound as e:
        raise e
    except Exception as e:
        current_app.logger.exception(e)
        if created:
            try:
                container.stop()
            except:
                pass
def get_top_manager_revenue(session, number_of_manager):
    """
    Function to perform read operation with the database to Find Top Manager with Highest Total Revenue in a Month

    :param session: The session to work with
    :type session: :class:`sqlalchemy.orm.session.Session`

    :param number_of_manager: The number of managers to be returned from the query
    :type number_of_manager: int

    :return: Nothing
    :rtype: None

    """
    try:
        if not issubclass(type(session), sqlalchemy.orm.session.Session):
            raise AttributeError(
                "session not passed correctly, should be of type 'sqlalchemy.orm.session.Session' "
            )

        if not issubclass(type(number_of_manager),
                          int) or number_of_manager < 1:
            raise AttributeError(
                "number of Managers should be integer and greater than 0")

        LOGGER.info("Performing Read Operation")

        # Creating an alias for manager and employee Since they both are from same table and needs to self reference
        manager = aliased(models.EmployeeTable)
        employee = aliased(models.EmployeeTable)

        # Selecting the Manager Id, Manager Name, And his Total Revenue, By summing all his invoice total
        query = session.query(
            employee.reports_to.label("manager_id"),
            func.concat(manager.first_name, " ",
                        manager.last_name).label("manager_name"),
            func.sum(models.InvoiceTable.total).label("total_revenue"))

        # Joining the customer table with invoice table, and with the previously aliased employee and manager table
        query = query.join(
            models.CustomerTable, models.InvoiceTable.customer_id ==
            models.CustomerTable.customer_id)
        query = query.join(
            employee,
            models.CustomerTable.support_rep_id == employee.employee_id)
        query = query.join(manager, employee.reports_to == manager.employee_id)

        # Filtering out the invoices which occurred in month 8 and year 2012
        query = query.filter(
            extract('month', models.InvoiceTable.invoice_date) == 8,
            extract('year', models.InvoiceTable.invoice_date) == 2012)

        # Grouping by Manager Id
        query = query.group_by("manager_id")

        # Sorting By total revenue
        query = query.order_by(desc("total_revenue"))

        results = query.limit(number_of_manager).all()

        if not results:
            raise NoResultFound("No Records Found")

        LOGGER.info(
            "\n\nThe Top %s Manager with Most Sales in Year: 2012 and Month: 08",
            number_of_manager)

        print("\n\n")
        print("===" * 50)
        print("\n\n")

        LOGGER.info(
            "\n\n %s",
            tabulate(results,
                     headers=["Manager ID", "Manager Name", "Total Revenue"],
                     tablefmt="grid"))

        print("\n\n")
        print("===" * 50)
        print("\n\n")
    except AttributeError as err:
        LOGGER.error(err)
    except NoResultFound as err:
        LOGGER.error(err)
    finally:
        session.close()
Beispiel #4
0
        assert response == 1


def test_get_session_retry():
    """Test get_session_retry."""
    resp = get_session_retry()
    assert resp is not None


@patch("src.utils.update_osio_registered_repos", return_value=None)
def test_update_data_1(a):
    """Test update_data."""
    DatabaseIngestion.update_data(None)


@patch("src.utils.update_osio_registered_repos", side_effect=NoResultFound())
def test_update_data_2(a):
    """Test update_data."""
    with pytest.raises(Exception):
        DatabaseIngestion.update_data(None)
    a.side_effect = SQLAlchemyError()
    with pytest.raises(Exception):
        DatabaseIngestion.update_data(None)


def test_get_store_record():
    """Test get_store_record."""
    with pytest.raises(Exception):
        DatabaseIngestion.store_record({"test": "test"})

def plot_oil_viscosities(settings):
    with transaction.manager:
        # -- Our loading routine --
        session = _get_db_session()

        if 'adios_id' not in settings:
            raise ValueError('adios_id setting is required.')
        adios_id = settings['adios_id']

        print 'our session: %s' % (session)
        try:
            oilobj = (session.query(Oil).join(ImportedRecord)
                      .filter(ImportedRecord.adios_oil_id == adios_id)
                      .one())
        except NoResultFound:
            raise NoResultFound('No Oil was found matching adios_id {0}'
                                .format(adios_id))

        if oilobj:
            print 'Our oil object: %s' % (oilobj)

            oil_props = OilProps(oilobj)
            print '\nOilProps:', oil_props
            print oil_props.kvis_at_temp()

            print '\nOur viscosities:'
            print [v for v in oilobj.kvis]

            print '\nOur unweathered viscosities (m^2/s, Kdegrees):'
            vis = [v for v in oilobj.kvis if v.weathering <= 0.0]
            print vis
            for i in [(v.m_2_s, v.ref_temp_k, v.weathering)
                      for v in vis]:
                print i

            x = np.array([v.ref_temp_k for v in vis]) - 273.15
            y = np.array([v.m_2_s for v in vis])
            xmin = x.min()
            xmax = x.max()
            xpadding = .5 if xmax == xmin else (xmax - xmin) * .3
            ymin = y.min()
            ymax = y.max()
            ypadding = (ymax / 2) if ymax == ymin else (ymax - ymin) * .3
            plt.plot(x, y, 'ro')
            plt.xlabel(r'Temperature ($^\circ$C)')
            plt.ylabel('Unweathered Kinematic Viscosity (m$^2$/s)')
            plt.yscale('log', subsy=[2, 3, 4, 5, 6, 7, 8, 9])
            plt.grid(True)
            plt.axis([xmin - xpadding, xmax + xpadding, 0, ymax + ypadding])

            # now we add the annotations
            for xx, yy in np.vstack((x, y)).transpose():
                print (xx, yy)
                if xx > x.mean():
                    xalign = -xpadding / 3
                else:
                    xalign = xpadding / 3
                yalign = ypadding / 3

                plt.annotate('(%s$^\circ$C, %s m$^2$/s)' % (xx, yy),
                             xy=(xx + (xalign / 10),
                                 yy + (yalign / 10)),
                             xytext=(xx + xalign, yy + yalign),
                             arrowprops=dict(facecolor='black',
                                             shrink=0.01),
                             fontsize=9)
            plt.show()
Beispiel #6
0
    def exists_or_not_found(cls, model_id: int = None, **fields):

        if cls.is_exists(model_id, **fields):
            return

        raise NoResultFound({'error_message': 'No row was found by id'})
def mocked_bad_query(table):
    raise NoResultFound()
Beispiel #8
0
def render_type(search_word):
    results = {'search_word': ''}
    lorder = tornado.template.Loader(find_data_file('templates'))

    def replace(html):
        html = re.sub(
            r'<a href=showinfo:(\d*)>(.*)</a>',
            lambda m: link_to(m.group(2), '/type/' + m.group(1)),
            html
        )
        html = re.sub(r'<font color=".*">', '<font color="orange">', html)
        return html

    def update_results2description(found_lcid, found_type):
        if found_lcid != config.default_lcid:
            type_ = sessions[config.default_lcid].query(Type).filter_by(id=found_type.id).one()
        else:
            type_ = found_type

        name = type_.name
        content_kwargs = {
            'name': type_.name,
            'description': replace(type_.description),
            'locale_name': '',
            'locale_description': '',
            'lcid': config.lcid,
            'default_lcid': config.default_lcid,
        }
        footer_kwargs = {}
        parent = {
            'link': '/group/%d' % type_.group.id,
            'text': type_.group.name,
        }

        if config.lcid != config.default_lcid:
            try:
                locale_type = sessions[config.lcid].query(Type).filter_by(id=type_.id).one()
                name = '%s(%s)' % (locale_type.name, type_.name)
                content_kwargs['locale_name'] = locale_type.name
                content_kwargs['locale_description'] = replace(locale_type.description)
                parent['text'] = '%s(%s)' % (type_.group.name, locale_type.group.name)
            except:
                pass

        content_kwargs['parent'] = parent
        footer_kwargs['parent'] = parent
        footer_kwargs['id_text'] = 'Type ID is %d' % type_.id

        try:
            if 'en' in config.locales:
                if found_lcid == 'en':
                    en_type = found_type
                elif config.default_lcid == 'en':
                    en_type = type_
                else:
                    en_type = sessions['en'].query(Type).filter_by(id=type_.id).one()

                content_kwargs['uniwiki'] = en_type.name
        except:
            pass

        image_path = ['static', 'images', 'types', '%d.png' % type_.id]
        if os.path.isfile(find_data_file(os.path.join(*image_path))):
            content_kwargs['image'] = '/' + '/'.join(image_path)

        results['name'] = name
        results['search_word'] = type_.name
        content_html = lorder.load('description.html').generate(
            kwargs=update_template_kwargs(content_kwargs)
        )
        footer_html = lorder.load('footer.html').generate(
            kwargs=update_template_kwargs(footer_kwargs)
        )
        results['content'] = tornado.escape.to_basestring(content_html)
        results['footer'] = tornado.escape.to_basestring(footer_html)

        if config.paste_result:
            clipboard_thread.update(type_.name)

        return results

    def search(lcid):
        try:
            query = sessions[lcid].query(Type)
            if search_word.isdigit():
                types = query.filter_by(id=search_word)
            else:
                types = query.filter(Type.name.contains(search_word))

            update_results2description(lcid, types.one())
        except MultipleResultsFound:
            if types.count() > config.search_limit:
                raise MultipleResultsFound()
            else:
                html = ''
                groups = collections.OrderedDict()

                list_lorder = lorder.load('list.html')

                for type_ in types.all():
                    group = type_.group

                    if group not in groups:
                        groups[group] = []

                    if lcid == config.default_lcid:
                        groups[group].append(type_)
                    else:
                        default_type = sessions[config.default_lcid].query(Type) \
                                        .filter_by(id=type_.id).one()
                        groups[group].append(default_type)

                for group, types_ in groups.items():
                    name = group.name
                    try:
                        locale_group = sessions[config.lcid].query(Group) \
                                        .filter_by(id=group.id).one()
                        name = '%s(%s)' % (locale_group.name, group.name)
                    except:
                        pass

                    kwargs = {
                        'name': name,
                        'items': create_list_items(Type, types_, '/type/'),
                    }

                    html += tornado.escape.to_basestring(
                        list_lorder.generate(kwargs=update_template_kwargs(kwargs))
                    )
                    html += '<hr />'

                results['content'] = html
                results['name'] = search_word
                results['search_word'] = search_word
        except NoResultFound:
            return False

        return True

    search_lcids = [config.default_lcid, config.lcid]

    if 'en' in config.locales:
        search_lcids.insert(0, 'en')

    for search_lcid in sorted(set(search_lcids), key=search_lcids.index):
        if search(search_lcid):
            return results

    raise NoResultFound()
Beispiel #9
0
 def no_result_found():
     raise NoResultFound()
Beispiel #10
0
 def delete_by_id(self, id: int):
     if not self._filter_by_id(id).delete():
         raise NoResultFound()
     self.db.commit()
Beispiel #11
0
 def update_by_id(self, id: int, data: Dict):
     if not self._filter_by_id(id).update(data):
         raise NoResultFound()
     self.db.commit()
     return self.find_by_id(id)
Beispiel #12
0
def perform_read_join(session, records):
    """
    Function to perform read operation with the database

    :param session: The session to work with
    :type session: :class:`sqlalchemy.orm.session.Session`

    :param records: The number of records to return from the query
    :type records: int

    :return: Nothing
    :rtype: None
    """
    try:

        if not issubclass(type(session), sqlalchemy.orm.session.Session):
            raise AttributeError(
                "session not passed correctly, should be of type 'sqlalchemy.orm.session.Session' "
            )

        if not issubclass(type(records), int) or records < 1:
            raise AttributeError(
                "number of records should be integer and greater than 0")

        LOGGER.info("Performing Read Operation")

        # Selecting the Invoice Id, Customer Id, Invoice Date, Invoice Total, Customer Name, Employee Name,
        # Employee Title, Track Name
        query = session.query(
            models.InvoiceTable.invoice_id, models.InvoiceTable.customer_id,
            models.InvoiceTable.invoice_date, models.InvoiceTable.total,
            models.CustomerTable.first_name,
            models.EmployeeTable.first_name.label("Employee_Name"),
            models.EmployeeTable.title, models.TracksTable.name)

        # Joining Invoice Table, Customer Table, Invoice Line, Employee, Tracks Table
        query = query.join(
            models.CustomerTable, models.InvoiceTable.customer_id ==
            models.CustomerTable.customer_id)
        query = query.join(
            models.InvoiceLineTable, models.InvoiceTable.invoice_id ==
            models.InvoiceLineTable.invoice_id)
        query = query.join(
            models.EmployeeTable, models.CustomerTable.support_rep_id ==
            models.EmployeeTable.employee_id)
        query = query.join(
            models.TracksTable,
            models.InvoiceLineTable.track_id == models.TracksTable.track_id)

        # Sorting by Invoice Id
        query = query.order_by(models.InvoiceTable.invoice_id)

        results = query.limit(records).all()

        if not results:
            raise NoResultFound("No Records Found")

        LOGGER.info("\n\nThe %s Invoice Records Are", records)

        print("\n\n")
        print("====" * 50)
        print("\n\n")

        LOGGER.info(
            "\n\n %s",
            tabulate(results,
                     headers=[
                         "Invoice ID", "Customer ID", "Invoice Date",
                         "Invoice Total", "Customer Name", "Support Rep Name",
                         "Support Rep Title", "Track"
                     ],
                     tablefmt="grid"))

        print("\n\n")
        print("====" * 50)
        print("\n\n")
    except AttributeError as err:
        LOGGER.error(err)
    except NoResultFound as err:
        LOGGER.error(err)
    finally:
        session.close()
Beispiel #13
0
    def post(self, workspace_name):
        """
        ---
          tags: ["Bulk"]
          description: Creates all faraday objects in bulk for a workspace
          requestBody:
            required: true
            content:
                application/json:
                    schema: BulkCreateSchema
          responses:
            201:tags:
              description: Created
              content:
                application/json:
                  schema: BulkCreateSchema
            401:
               $ref: "#/components/responses/UnauthorizedError"
            403:
               description: Disabled workspace
            404:
               description: Workspace not found
        """
        from faraday.server.threads.reports_processor import REPORTS_QUEUE  # pylint: disable=import-outside-toplevel

        if flask_login.current_user.is_anonymous:
            agent = require_agent_token()
        data = self._parse_data(self._get_schema_instance({}), flask.request)
        json_data = flask.request.json
        if flask_login.current_user.is_anonymous:
            workspace = self._get_workspace(workspace_name)

            if not workspace or workspace not in agent.workspaces:
                flask.abort(404, f"No such workspace: {workspace_name}")

            if "execution_id" not in data:
                flask.abort(400, "argument expected: execution_id")

            execution_id = data["execution_id"]

            agent_execution: AgentExecution = AgentExecution.query.filter(
                AgentExecution.id == execution_id).one_or_none()

            if agent_execution is None:
                logger.exception(
                    NoResultFound(
                        f"No row was found for agent executor id {execution_id}"
                    ))
                flask.abort(400,
                            "Can not find an agent execution with that id")

            if workspace_name != agent_execution.workspace.name:
                logger.exception(
                    ValueError(
                        f"The {agent.name} agent has permission to workspace {workspace_name} and ask to write "
                        f"to workspace {agent_execution.workspace.name}"))
                flask.abort(400, "Trying to write to the incorrect workspace")

            params_data = agent_execution.parameters_data
            params = ', '.join(
                [f'{key}={value}' for (key, value) in params_data.items()])

            start_date = (data["command"].get("start_date") or agent_execution.command.start_date) \
                if "command" in data else agent_execution.command.start_date

            end_date = data["command"].get("end_date",
                                           None) if "command" in data else None

            data["command"] = {
                'id': agent_execution.command.id,
                'tool': agent.name,  # Agent name
                'command': agent_execution.executor.name,
                'user': '',
                'hostname': '',
                'params': params,
                'import_source': 'agent',
                'start_date': start_date
            }

            if end_date is not None:
                data["command"]["end_date"] = end_date

            command = Command.query.filter(
                Command.id == agent_execution.command.id).one_or_none()
            if command is None:
                logger.exception(
                    ValueError(
                        f"There is no command with {agent_execution.command.id}"
                    ))
                flask.abort(400, "Trying to update a not existent command")

            _update_command(command, data['command'])
            db.session.flush()
            if data['hosts']:
                json_data['command'] = data["command"]
                json_data['command']["start_date"] = data["command"][
                    "start_date"].isoformat()
                if 'end_date' in data["command"]:
                    json_data['command']["end_date"] = data["command"][
                        "end_date"].isoformat()

        else:
            workspace = self._get_workspace(workspace_name)
            command = Command(**(data['command']))
            command.workspace = workspace
            db.session.add(command)
            db.session.commit()
        if data['hosts']:
            # Create random file
            chars = string.ascii_uppercase + string.digits
            random_prefix = ''.join(random.choice(chars)
                                    for x in range(30))  # nosec
            json_file = f"{random_prefix}.json"
            file_path = CONST_FARADAY_HOME_PATH / 'uploaded_reports' \
                        / json_file
            with file_path.open('w') as output:
                json.dump(json_data, output)
            logger.info("Create tmp json file for bulk_create: %s", file_path)
            user_id = flask_login.current_user.id if not flask_login.current_user.is_anonymous else None
            REPORTS_QUEUE.put(
                (workspace.name, command.id, file_path, None, user_id))
        return flask.jsonify({
            "message":
            "Created",
            "command_id":
            None if command is None else command.id
        }), 201
Beispiel #14
0
 def __getitem__(self, key):
     try:
         return LibraryQuery(DBSession).get_library_by_id(key)
     except NoResultFound:
         raise NoResultFound('ERR_LIBRARY_NOT_EXIST')
Beispiel #15
0
def get_delivered_project_asset(delivered_project_asset_id):
    try:
        return DeliveredProjectAsset.query.filter(DeliveredProjectAsset.id == delivered_project_asset_id).one()
    except NoResultFound as e:
        raise NoResultFound(f"delivered_project_asset with id {delivered_project_asset_id} doesn't exist") from e
def get_number_of_playlist_tracks(session, number_of_tracks):
    """
    Function to perform read operation with the database to get the number of playlist a track has been added to

    :param session: The session to work with
    :type session: sqlalchemy.orm.session.Session

    :param number_of_tracks: The number of tracks to be returned from the query
    :type number_of_tracks: int

    :return: Nothing
    :rtype: None
    """
    try:
        if not issubclass(type(session), sqlalchemy.orm.session.Session):
            raise AttributeError(
                "session not passed correctly, should be of type 'sqlalchemy.orm.session.Session' "
            )

        if not issubclass(type(number_of_tracks), int) or number_of_tracks < 1:
            raise AttributeError(
                "number of tracks should be integer and greater than 0")

        LOGGER.info("Performing Read Operation")

        # Selecting the Track Id, Track Name, and Count of playlist IDs
        query = session.query(
            models.PlaylistTrackTable.track_id, models.TracksTable.name,
            func.count(models.PlaylistTrackTable.play_list_id).label(
                "number_of_playlist"))

        # Joining tracks table and playlisttrack table
        query = query.join(
            models.TracksTable,
            models.PlaylistTrackTable.track_id == models.TracksTable.track_id)

        # Grouping by Track Id
        query = query.group_by(models.PlaylistTrackTable.track_id)

        # Sorting by number_of_playlist and track id
        query = query.order_by(desc("number_of_playlist"),
                               models.PlaylistTrackTable.track_id)

        results = query.limit(number_of_tracks).all()

        if not results:
            raise NoResultFound("No Records Found")

        LOGGER.info(
            "\n\nThe Top %s Tracks, Based On Number Of Playlist It Is Added To",
            number_of_tracks)

        print("\n\n")
        print("===" * 50)
        print("\n\n")

        LOGGER.info(
            "\n\n %s",
            tabulate(results,
                     headers=["Track ID", "Track Name", "Number Of Playlist"],
                     tablefmt="grid"))

        print("\n\n")
        print("===" * 50)
        print("\n\n")
    except AttributeError as err:
        LOGGER.error(err)
    except NoResultFound as err:
        LOGGER.error(err)
    finally:
        session.close()
Beispiel #17
0
def run():
    bucket = os.environ["BATCHPAR_s3_bucket"]
    abstract_file = os.environ["BATCHPAR_s3_key"]
    dupe_file = os.environ["BATCHPAR_dupe_file"]
    es_config = literal_eval(os.environ["BATCHPAR_outinfo"])
    db = os.environ["BATCHPAR_db"]
    entity_type = os.environ["BATCHPAR_entity_type"]

    # mysql setup
    engine = get_mysql_engine("BATCHPAR_config", "mysqldb", db)
    Session = sessionmaker(bind=engine)
    session = Session()

    # retrieve a batch of meshed terms
    mesh_terms = retrieve_mesh_terms(bucket, abstract_file)
    mesh_terms = format_mesh_terms(mesh_terms)
    logging.info(f'batch {abstract_file} contains '
                 f'{len(mesh_terms)} meshed abstracts')

    # retrieve duplicate map
    dupes = retrieve_duplicate_map(bucket, dupe_file)
    dupes = format_duplicate_map(dupes)
    
    # Set up elastic search connection
    field_null_mapping = load_json_from_pathstub("tier_1/"
                                                 "field_null_mappings/",
                                                 "health_scanner.json")
    es = ElasticsearchPlus(hosts=es_config['host'],
                           port=es_config['port'],
                           aws_auth_region=es_config['region'],
                           use_ssl=True,
                           entity_type=entity_type,
                           strans_kwargs=None,
                           field_null_mapping=field_null_mapping,
                           null_empty_str=True,
                           coordinates_as_floats=True,
                           country_detection=True,
                           listify_terms=True)
    all_es_ids = get_es_ids(es, es_config)

    docs = []
    for doc_id, terms in mesh_terms.items():
        if doc_id not in all_es_ids:
            continue
        try:
            _filter = Abstracts.application_id == doc_id
            abstract = (session.query(Abstracts)
                        .filter(_filter).one())
        except NoResultFound:
            logging.warning(f'Not found {doc_id} in database')
            raise NoResultFound(doc_id)
        clean_abstract_text = clean_abstract(abstract.abstract_text)
        docs.append({'doc_id': doc_id,
                     'terms_mesh_abstract': terms,
                     'textBody_abstract_project': clean_abstract_text
                     })
        duped_docs = dupes.get(doc_id, [])
        if len(duped_docs) > 0:
            logging.info(f'Found {len(duped_docs)} duplicates')
        for duped_doc in duped_docs:
            docs.append({'doc_id': duped_doc,
                         'terms_mesh_abstract': terms,
                         'textBody_abstract_project': clean_abstract_text,
                         'booleanFlag_duplicate_abstract': True
                         })
            
    # output to elasticsearch
    logging.warning(f'Writing {len(docs)} documents to elasticsearch')
    for doc in docs:
        uid = doc.pop("doc_id")
        # Extract existing info
        existing = es.get(es_config['index'], 
                          doc_type=es_config['type'], 
                          id=uid)['_source']
        # Merge existing info into new doc
        doc = {**existing, **doc}
        es.index(index=es_config['index'], 
                 doc_type=es_config['type'], id=uid, body=doc)
Beispiel #18
0
def get_bidding(bidding_id):
    try:
        return Bidding.query.filter(Bidding.id == bidding_id).one()
    except NoResultFound as e:
        raise NoResultFound(f"Bidding with id {bidding_id} doesn't exist") from e
Beispiel #19
0
 def accept(cls, email):
     pending_user = UserModel.find_by_email(email)
     if not pending_user:
         raise NoResultFound(f"No user registered with email: {email}")
     pending_user.pending = False
     pending_user.save_to_db()
Beispiel #20
0
def get_biddings_by_email(freelancer_email):
    try:
        return Bidding.query.filter(Bidding.freelancer_email == freelancer_email).all()
    except NoResultFound as e:
        raise NoResultFound(f"Biddings for freelancer email {freelancer_email} don't exist") from e
Beispiel #21
0
    def __init__(
        self,
        session,
        selected_atoms,
        chianti_ions=None,
        kurucz_short_name="ku_latest",
        chianti_short_name="chianti_v8.0.2",
        nist_short_name="nist-asd",
        atom_masses_max_atomic_number=30,
        lines_loggf_threshold=-3,
        levels_metastable_loggf_threshold=-3,
        collisions_temperatures=None,
    ):

        self.session = session

        # Set the parameters for the dataframes
        self.atom_masses_param = {
            "max_atomic_number": atom_masses_max_atomic_number
        }

        self.levels_lines_param = {
            "levels_metastable_loggf_threshold":
            levels_metastable_loggf_threshold,
            "lines_loggf_threshold": lines_loggf_threshold
        }

        if collisions_temperatures is None:
            collisions_temperatures = np.arange(2000, 50000, 2000)
        else:
            collisions_temperatures = np.array(collisions_temperatures,
                                               dtype=np.int64)

        self.collisions_param = {"temperatures": collisions_temperatures}

        try:
            self.selected_atomic_numbers = list(
                map(int, parse_selected_atoms(selected_atoms)))
        except ParseException:
            raise ValueError(
                'Input is not a valid atoms string {}'.format(selected_atoms))

        if chianti_ions is not None:
            # Get a list of tuples (atomic_number, ion_charge) for the chianti ions

            try:
                self.chianti_ions = parse_selected_species(chianti_ions)
                self.chianti_ions = [
                    tuple(map(int, t)) for t in self.chianti_ions
                ]

            except ParseException:
                raise ValueError(
                    'Input is not a valid species string {}'.format(
                        chianti_ions))

            try:
                chianti_atomic_numbers = {
                    atomic_number
                    for atomic_number, ion_charge in self.chianti_ions
                }
                assert chianti_atomic_numbers.issubset(
                    set(self.selected_atomic_numbers))
            except AssertionError:
                raise ValueError(
                    "Chianti ions *must* be species of selected atoms!")
        else:
            self.chianti_ions = list()

        self._chianti_ions_table = None

        # Query the data sources
        self.ku_ds = None
        self.ch_ds = None
        self.nist_ds = None

        try:
            self.ku_ds = session.query(DataSource).filter(
                DataSource.short_name == kurucz_short_name).one()
        except NoResultFound:
            raise NoResultFound("Kurucz data source is not found!")

        try:
            self.nist_ds = session.query(DataSource).filter(
                DataSource.short_name == nist_short_name).one()
        except NoResultFound:
            raise NoResultFound("NIST ASD data source is not found!")

        if self.chianti_ions:
            try:
                self.ch_ds = session.query(DataSource).filter(
                    DataSource.short_name == chianti_short_name).one()
            except NoResultFound:
                raise NoResultFound("Chianti data source is not found!")

        self._atom_masses = None
        self._ionization_energies = None
        self._levels = None
        self._lines = None
        self._collisions = None
        self._macro_atom = None
        self._macro_atom_references = None
        self._zeta_data = None
                                                   notification, test_email)


def test_govdelivery_callback_returns_200(
    client,
    mock_dao_get_notification_by_reference,
    mock_update_notification_status,
):
    response = post(client, get_govdelivery_request("123456", "sent"))

    assert response.status_code == 200


@pytest.mark.parametrize("exception, exception_name",
                         [(MultipleResultsFound(), 'MultipleResultsFound'),
                          (NoResultFound(), 'NoResultFound')])
def test_govdelivery_callback_always_returns_200_after_expected_exceptions(
        client, mock_dao_get_notification_by_reference, mock_statsd, exception,
        exception_name):
    mock_dao_get_notification_by_reference.side_effect = exception

    response = post(client, get_govdelivery_request("123456", "sent"))
    mock_statsd.incr.assert_called_with(
        f'callback.govdelivery.failure.{exception_name}')

    assert response.status_code == 200


def test_govdelivery_callback_raises_invalid_request_if_missing_data(client):
    response = post(client, {"not-the-right-key": "foo"})
Beispiel #23
0
def get_top_employee_sales(session, number_of_employee):
    """
    Function to perform read operation with the database to Find Top Employee with Most Sales in a Month

    :param session: The session to work with
    :type session: :class:`sqlalchemy.orm.session.Session`

    :param number_of_employee: The number of albums to be returned from the query
    :type number_of_employee: int

    :return: Nothing
    :rtype: None
    """
    try:
        if not issubclass(type(session), sqlalchemy.orm.session.Session):
            raise AttributeError(
                "session not passed correctly, should be of type 'sqlalchemy.orm.session.Session' "
            )

        if not issubclass(type(number_of_employee),
                          int) or number_of_employee < 1:
            raise AttributeError(
                "number of Employee should be integer and greater than 0")

        LOGGER.info("Performing Read Operation")

        # Selecting the Employee Id, Employee Name, and Total Sales
        query = session.query(
            models.CustomerTable.support_rep_id.label("employee_id"),
            func.concat(models.EmployeeTable.first_name, " ",
                        models.EmployeeTable.last_name).label("name"),
            func.count(models.InvoiceTable.invoice_id).label("total_sales"))

        # Joining Invoice, customer and employee Table
        query = query.join(
            models.CustomerTable, models.InvoiceTable.customer_id ==
            models.CustomerTable.customer_id)
        query = query.join(
            models.EmployeeTable, models.CustomerTable.support_rep_id ==
            models.EmployeeTable.employee_id)

        # Filtering the result For given year and month
        query = query.filter(
            extract('month', models.InvoiceTable.invoice_date) == 8,
            extract('year', models.InvoiceTable.invoice_date) == 2012)

        # Grouping by Employee Id
        query = query.group_by(models.CustomerTable.support_rep_id)

        # Sorting by total_sales and employee id
        query = query.order_by(desc("total_sales"),
                               models.CustomerTable.support_rep_id)

        results = query.limit(number_of_employee).all()

        if not results:
            raise NoResultFound("No Records Found")

        LOGGER.info(
            "\n\nThe Top %s Employee with Most Sales in Year: 2012 and Month: 08",
            number_of_employee)

        print("\n\n")
        print("===" * 50)
        print("\n\n")

        LOGGER.info(
            "\n\n %s",
            tabulate(results,
                     headers=["Employee ID", "Employee Name", "Total Sales"],
                     tablefmt="grid"))

        print("\n\n")
        print("===" * 50)
        print("\n\n")
    except AttributeError as err:
        LOGGER.error(err)
    except NoResultFound as err:
        LOGGER.error(err)
    finally:
        session.close()
Beispiel #24
0
def get_entity_from_tag(tag: Union[str, KLAP4_TAG, PLAYLIST_TAG]) -> SQLBase:
    # If tag is a string, turn it into a named tuple
    if isinstance(tag, str):
        tag = decompose_tag(tag)

    entity = None

    try:
        from klap4.db import Session
        session = Session()
        if isinstance(tag, KLAP4_TAG) and tag.genre_abbr is not None:
            entity = session.query(Genre) \
                .filter(Genre.abbreviation == tag.genre_abbr) \
                .one()

            if tag.artist_num is not None and entity is not None:
                entity = session.query(Artist) \
                    .join(
                        Genre, and_(
                            Genre.id == Artist.genre_id,
                            Genre.abbreviation == tag.genre_abbr
                        )
                    ) \
                    .filter(
                            Artist.number == tag.artist_num
                    ) \
                    .one()

                if entity is not None and tag.album_letter is not None:
                    entity = session.query(Album) \
                        .join(
                            Artist, and_(
                                Artist.id == Album.artist_id,
                                Artist.number == tag.artist_num
                            )
                        ) \
                        .join(
                            Genre, and_(
                                Genre.id == Artist.genre_id,
                                Genre.abbreviation == tag.genre_abbr
                            )
                        ) \
                        .filter(
                                Album.letter == tag.album_letter
                        ) \
                        .one()

                    if entity is not None and tag.song_num is not None:
                        entity = session.query(Song) \
                            .filter(
                                and_(
                                    Song.genre_abbr == tag.genre_abbr,
                                    Song.artist_num == tag.artist_num,
                                    Song.album_letter == tag.album_letter,
                                    Song.number == tag.song_num
                                )
                            ) \
                            .one()
                    elif entity is not None and tag.album_review_dj_id is not None:
                        entity = session.query(AlbumReview) \
                            .filter(
                                and_(
                                    AlbumReview.genre_abbr == tag.genre_abbr,
                                    AlbumReview.artist_num == tag.artist_num,
                                    AlbumReview.album_letter == tag.album_letter,
                                    AlbumReview.dj_id == tag.album_review_dj_id
                                )
                            ) \
                            .one()
        elif isinstance(tag, PLAYLIST_TAG) and tag.dj_id is not None:
            entity = session.query(DJ) \
                .filter(DJ.id == tag.dj_id) \
                .one()

            if tag.name is not None:
                entity = session.query(Playlist) \
                    .filter(
                        and_(
                            Playlist.dj_id == tag.dj_id,
                            Playlist.name == tag.name
                        )
                    ) \
                    .one()

                if tag.song_num is not None:
                    entity = session.query(PlaylistEntry) \
                        .filter(
                            and_(
                                PlaylistEntry.dj_id == tag.dj_id,
                                PlaylistEntry.playlist_name == tag.name,
                                PlaylistEntry.index == tag.song_num
                            )
                        ) \
                        .one()
    except NoResultFound as e:
        tag_str = ''.join([str(d) if d is not None else '' for d in tag])
        raise NoResultFound(f"No tag found: '{tag_str}'") from e

    return entity
Beispiel #25
0
    def onMessage(self, payload, is_binary):
        """
            We only support JOIN and LEAVE workspace messages.
            When authentication is implemented we need to verify
            that the user can join the selected workspace.
            When authentication is implemented we need to reply
            the client if the join failed.
        """
        from faraday.server.web import get_app  # pylint:disable=import-outside-toplevel
        if not is_binary:
            message = json.loads(payload)
            if message['action'] == 'JOIN_WORKSPACE':
                if 'workspace' not in message or 'token' not in message:
                    logger.warning(f'Invalid join workspace message: {message}')
                    self.sendClose()
                    return
                signer = itsdangerous.TimestampSigner(get_app().config['SECRET_KEY'],
                                                      salt="websocket")
                try:
                    workspace_id = signer.unsign(message['token'], max_age=60)
                except itsdangerous.BadData as e:
                    self.sendClose()
                    logger.warning('Invalid websocket token for workspace '
                                   '{}'.format(message['workspace']))
                    logger.exception(e)
                else:
                    with get_app().app_context():
                        workspace = Workspace.query.get(int(workspace_id))
                    if workspace.name != message['workspace']:
                        logger.warning(
                            'Trying to join workspace {} with token of '
                            'workspace {}. Rejecting.'.format(
                                message['workspace'], workspace.name
                            ))
                        self.sendClose()
                    else:
                        self.factory.join_workspace(
                            self, message['workspace'])
            if message['action'] == 'LEAVE_WORKSPACE':
                self.factory.leave_workspace(self, message['workspace'])
            if message['action'] == 'JOIN_AGENT':
                if 'token' not in message or 'executors' not in message:
                    logger.warning("Invalid agent join message")
                    self.sendClose(1000, reason="Invalid JOIN_AGENT message")
                    return False
                with get_app().app_context():
                    try:
                        agent = decode_agent_websocket_token(message['token'])
                        update_executors(agent, message['executors'])
                    except ValueError:
                        logger.warning('Invalid agent token!')
                        self.sendClose(1000, reason="Invalid agent token!")
                        return False
                    # factory will now send broadcast messages to the agent
                    return self.factory.join_agent(self, agent)
            if message['action'] == 'LEAVE_AGENT':
                with get_app().app_context():
                    (agent_id,) = [
                        k
                        for (k, v) in connected_agents.items()
                        if v == self
                    ]
                    agent = Agent.query.get(agent_id)
                    assert agent is not None  # TODO the agent could be deleted here
                return self.factory.leave_agent(self, agent)
            if message['action'] == 'RUN_STATUS':
                with get_app().app_context():
                    if 'executor_name' not in message:
                        logger.warning(f'Missing executor_name param in message: {message}')
                        return True

                    (agent_id,) = [
                        k
                        for (k, v) in connected_agents.items()
                        if v == self
                    ]
                    agent = Agent.query.get(agent_id)
                    assert agent is not None  # TODO the agent could be deleted here

                    execution_id = message.get('execution_id', None)
                    assert execution_id is not None
                    agent_execution = AgentExecution.query.filter(AgentExecution.id == execution_id).first()
                    if agent_execution:

                        if agent_execution.workspace.name not in \
                                [
                                    workspace.name
                                    for workspace in agent.workspaces
                                ]:
                            logger.exception(
                                ValueError(
                                    f"The {agent.name} agent has permission "
                                    f"to workspace {agent.workspaces} and "
                                    "ask to write to workspace "
                                    f"{agent_execution.workspace.name}"
                                )
                            )
                        else:
                            agent_execution.successful = message.get('successful', None)
                            agent_execution.running = message.get('running', None)
                            agent_execution.message = message.get('message', '')
                            db.session.commit()
                    else:
                        logger.exception(
                            NoResultFound(f"No row was found for agent executor id {execution_id}"))
Beispiel #26
0
def get_tracks_with_more_genre(session, number_of_tracks):
    """
    Function to perform read operation with the database to Get Tracks with More than One Genre

    :param session: The session to work with
    :type session: sqlalchemy.orm.session.Session

    :param number_of_tracks: The number of tracks to be returned from the query
    :type number_of_tracks: int

    :return: Nothing
    :rtype: None

    """
    try:

        if not issubclass(type(session), sqlalchemy.orm.session.Session):
            raise AttributeError(
                "session not passed correctly, should be of type 'sqlalchemy.orm.session.Session' "
            )

        if not issubclass(type(number_of_tracks), int) or number_of_tracks < 1:
            raise AttributeError(
                "number of tracks should be integer and greater than 0")

        LOGGER.info("Performing Read Operation")

        # Creating a subquery that returns the name of tracks which is associated with more the one genre
        stmt = session.query(models.TracksTable.name).group_by(
            models.TracksTable.name)
        stmt = stmt.having(
            func.count(distinct(models.TracksTable.genre_id)) > 1).subquery()

        # Selecting the Track Name, Genre Name
        query = session.query(
            distinct(models.TracksTable.name).label("track_name"),
            models.GenreTable.name.label("genre_name"))

        # Filtering the query to return only tracks that are returned from the subquery
        query = query.filter(models.TracksTable.name.in_(stmt))

        # Joining tracks table and genre table
        query = query.join(
            models.GenreTable,
            models.TracksTable.genre_id == models.GenreTable.genre_id)

        # Sorting by Tracks Name
        query = query.order_by(models.TracksTable.name)

        results = query.limit(number_of_tracks).all()

        if not results:
            raise NoResultFound("No Records Found")

        LOGGER.info("\n\n%s Tracks, That Are Part Of More Than One Genre",
                    number_of_tracks)

        print("\n\n")
        print("===" * 50)
        print("\n\n")

        LOGGER.info(
            "\n\n %s",
            tabulate(results,
                     headers=["Track Name", "Genre Name"],
                     tablefmt="grid"))

        print("\n\n")
        print("===" * 50)
        print("\n\n")
    except AttributeError as err:
        LOGGER.error(err)
    except NoResultFound as err:
        LOGGER.error(err)
    finally:
        session.close()
Beispiel #27
0
 def get_one(cls, oid, is_deleted=None):
     obj = cls.get(oid, is_deleted=is_deleted)
     if obj is None:
         raise NoResultFound()
     return obj
Beispiel #28
0
    def get_by_id(self,
                  info_role,
                  id_export,
                  with_data=False,
                  filters=None,
                  limit=1000,
                  offset=0,
                  export_format=None):
        """
            Fonction qui retourne les données pour un export données

        .. :quickref: retourne les données pour un export données


        :query {} info_role: Role ayant demandé l'export
        :query int id_export: Identifiant de l'export
        :query boolean with_data: Indique si oui ou non la fonction
                retourne les données associées à l'export.
                Si non retourne la définition de l'export
        :query {} filters: Filtres à appliquer sur les données
        :query int limit: Nombre maximum de données à retourner
        :query int offset: Numéro de page à retourner
        :query str export_format: format de l'export (csv, json, shp)

        **Returns:**

        .. sourcecode:: http

            {
                'total': Number total of results,
                'total_filtered': Number of results after filteer ,
                'page': Page number,
                'limit': Limit,
                'items': data on GeoJson format
                'licence': information of licence associated to data
            }


        """
        result = None
        end_time = None
        log = None
        exc = None
        status = -2
        start_time = datetime.utcnow()
        if not filters:
            filters = dict()

        try:
            # Test si l'export est autorisé
            try:
                self.get_export_is_allowed(id_export, info_role)
            except (NoResultFound) as exp:
                LOGGER.warn('repository.get_by_id(): %s', str(exp))
                exc = exp
                raise

            # Récupération de l'export
            export_ = Export.query.filter_by(id=id_export).one()

            if not with_data or not export_format:
                return export_.as_dict(True)

            geometry = (export_.geometry_field if
                        (hasattr(export_, 'geometry_field')
                         and current_app.config['export_format_map']
                         [export_format]['geofeature']) else None)

            columns, data = self._get_data(export_,
                                           geom_column_header=geometry,
                                           filters=filters,
                                           limit=limit,
                                           offset=offset)

            if len(data.get('items')) == 0:
                raise EmptyDataSetError(
                    'Empty dataset for export id {} with id_role {}.'.format(
                        id_export, info_role.id_role))

            status = 0
            result = (export_.as_dict(True), columns, data)

        except (InsufficientRightsError, NoResultFound,
                EmptyDataSetError) as e:
            LOGGER.warn('repository.get_by_id(): %s', str(e))
            exc = e
            raise
        except Exception as e:
            exc = e
            LOGGER.critical('exception: %s', e)
            raise
        finally:
            end_time = datetime.utcnow()
            if exc:
                exp_tb = sys.exc_info()
                if (isinstance(exc, InsufficientRightsError)
                        or isinstance(exc, EmptyDataSetError)):
                    raise
                elif isinstance(exc, NoResultFound):
                    raise NoResultFound(
                        'Unknown export id {}.'.format(id_export))
                else:
                    log = str(exp_tb)
                    status = -1

            ExportLog.record({
                'id_role': info_role.id_role,
                'id_export': export_.id,
                'export_format': export_format,
                'start_time': start_time,
                'end_time': end_time,
                'status': status,
                'log': log
            })

            if status != 0 or exc:
                LOGGER.critical('export error: %s', exp_tb)
                raise
            else:
                return result
Beispiel #29
0
 def raise_not_found():
     raise NoResultFound()
Beispiel #30
0
    async def one(self):  # type: ignore

        result = await self.first()
        if result is None:
            raise NoResultFound("No row was found for one()")
        return result