示例#1
0
def make_chunks(physiological_file_id, config_file, verbose):
    """
    Call the function create_chunks_for_visualization of the Physiology class on
    the PhysiologicalFileID provided as argument to this function.

    :param physiological_file_id: PhysiologicalFileID of the file to chunk
     :type physiological_file_id: int
    :param config_file: path to the config file with database connection information
     :type config_file: str
    :param verbose    : flag for more printing if set
     :type verbose    : bool
    """

    # database connection
    db = Database(config_file.mysql, verbose)
    db.connect()

    # grep config settings from the Config module
    data_dir = db.get_config('dataDirBasepath')

    # making sure that there is a final / in data_dir
    data_dir = data_dir if data_dir.endswith('/') else data_dir + "/"

    # load the Physiological object
    physiological = Physiological(db, verbose)

    # create the chunked dataset
    if physiological.grep_file_path_from_file_id(physiological_file_id):
        print('Chunking physiological file ID ' + str(physiological_file_id))
        physiological.create_chunks_for_visualization(physiological_file_id,
                                                      data_dir)
示例#2
0
def vul_scan(domain, now_time):
    datas = []
    database = Database(
        os.path.join(
            os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
            'srcscan.db'))
    database.connect()
    database.init()
    logger.sysinfo("Scaning vul for: %s " % (domain))
    for _row in database.select_mondomain(domain):
        data = {
            "subdomain": _row[0],
            "url": _row[1],
            "title": _row[2],
            "status": _row[3],
            "len": _row[4],
            "update_time": _row[5],
            "domain": _row[6]
        }
        datas.append(data)

    for data in datas:
        if data['status'] != 0:
            logger.sysinfo("Scaning vul for %s." % (data['url']))
            crawlergo_scan(data['url'], data['domain'], now_time, database)

    logger.sysinfo("Scaned vul for: %s " % (domain))
    database.disconnect()
示例#3
0
def subdomain_scan(domain, ret, now_time):
    database = Database(
        os.path.join(
            os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
            'srcscan.db'))
    database.connect()
    database.init()
    logger.sysinfo("Scanning domain %s." % domain)
    _engines = [_(domain) for _ in engines.values()]
    loop = asyncio.get_event_loop()
    if debug:
        loop.set_debug(True)
    for task in [asyncio.ensure_future(_engine.run()) for _engine in _engines]:
        loop.run_until_complete(task)
    # loop.close()

    for _engine in _engines:
        logger.sysinfo("{engine} Found {num} sites".format(
            engine=_engine.engine_name, num=len(_engine.results['subdomain'])))
        ret.update(_engine.results['subdomain'])
    logger.sysinfo("Found %d subdomains of %s." % (len(ret), domain))
    for subdomain in ret:
        database.insert_subdomain(subdomain, None, None, 0, 0, now_time,
                                  domain)
    database.disconnect()
    return ret
示例#4
0
def title_scan(domain, ret, now_time):
    ret = list(ret)
    database = Database(
        os.path.join(
            os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
            'srcscan.db'))
    database.connect()
    database.init()
    logger.sysinfo('Checking %d subdomains of %s.' % (len(ret), domain))
    loop = asyncio.get_event_loop()
    thread_num = int(conf['config']['basic']['thread_num'])
    thread_num = thread_num if len(ret) > thread_num else thread_num
    tasks = []
    for i in range(0, thread_num):
        tasks.append(
            asyncio.ensure_future(
                get_title([ret[x]
                           for x in range(0 + i, len(ret), thread_num)])))
    loop.run_until_complete(asyncio.wait(tasks))
    for task in tasks:
        for subdomain, url, title, status, content_length in task.result():
            database.update_subdomain_status(subdomain, url, title, status,
                                             content_length, now_time)
    database.disconnect()
    logger.sysinfo("Checked subdomains' status of %s." % domain)
示例#5
0
def test_installer_simple():
    silentremove("/tmp/simple.meta")

    error_code = main([SIMPLE, "/tmp/simple.meta"])

    assert error_code == 0

    with open("/tmp/simple.meta") as metadata:
        reader = csv.reader(metadata)
        n_tables = 0
        products_table_exists = 0
        for row in reader:
            n_tables += 1
            if row[1] == 'Products':
                products_table_exists += 1
                assert "xmin" in row[2]
                assert "productDescription" in row[2]
        assert n_tables == 8
        assert products_table_exists == 1

    # check solr
    s = Solr()
    res = s.list()
    assert res["hits"] == 8
    assert len(res["docs"]) == 8

    # check database
    db = Database()
    db.connect()
    cursor = db.cursor()
    cursor.execute("SELECT * FROM documents ORDER BY ID")
    rows = cursor.fetchall()
    assert len(rows) == 8
    assert rows[0]["universal_id"] == "ClassicModels.public.OrderDetails"
    assert rows[1]["universal_id"] == "ClassicModels.public.Offices"
    assert rows[2]["universal_id"] == "ClassicModels.public.Payments"
    assert rows[3]["universal_id"] == "ClassicModels.public.ProductLines"
    assert rows[4]["universal_id"] == "ClassicModels.public.Customers"
    assert rows[5]["universal_id"] == "ClassicModels.public.Orders"
    assert rows[6]["universal_id"] == "ClassicModels.public.Employees"
    assert rows[7]["universal_id"] == "ClassicModels.public.Products"

    cursor.execute("SELECT * FROM filters ORDER BY ID")
    rows = cursor.fetchall()
    assert len(rows) == 2
    assert rows[0]["name"] == "ClassicModels"
    assert rows[1]["name"] == "public"
    assert rows[0]["id"] == rows[1]["parent_id"]
示例#6
0
    def insert_or_update(cls, info):
        if not info:
            print("Nothing to write...")
            return None

        table_name = cls.TABLE_NAME
        keys = ", ".join(info.keys())
        values = ", ".join(["%s"] * len(info))
        update_keys = ", ".join(map(lambda x: f"{x}= excluded.{x}", info.keys()))

        conn = None
        cursor = None
        query = (
            f"INSERT INTO {table_name} ({keys}) VALUES ({values}) "
            f"ON CONFLICT (slack_id) DO UPDATE "
            f"SET {update_keys}"
        )

        try:
            conn = Database.connect()
            cursor = conn.cursor()
            cursor.execute(query, list(info.values()))
        except Exception as e:
            # TODO: Change print to logger
            print(e)
            return None
        finally:
            if cursor:
                cursor.close()
            if conn:
                conn.commit()
                conn.close()
示例#7
0
    def insert(cls, message, match):
        if not message:
            print("No message to write...")
            return None

        data = {
            "message_id": message.id or -1,
            "text": message.text,
            "room": message.room,
            "user_id": message.user.id,
            "match": "t" if match else "f",
        }

        table_name = cls.TABLE_NAME
        keys = ", ".join(data.keys())
        values = ", ".join(["%s"] * len(data))

        conn = None
        cursor = None
        query = f"INSERT INTO {table_name} ({keys}) VALUES ({values})"

        try:
            conn = Database.connect()
            cursor = conn.cursor()
            cursor.execute(query, list(data.values()))
        except Exception as e:
            # TODO: Change print to logger
            print(e)
            return None
        finally:
            if cursor:
                cursor.close()
            if conn:
                conn.commit()
                conn.close()
示例#8
0
def server_loop():
    global bind_ip
    global bind_port
    global threads

    db = Database()
    db.connect()
    #db.init()
    db.commit()
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s.bind((bind_ip, bind_port))
    s.listen(20)
    while True:
        connection, address = s.accept()
        t = threading.Thread(target=handle_client, args=(connection, db))
        threads.append(t)
        t.setDaemon(True)
        t.start()
    db.disconnect()
    return
示例#9
0
def server_loop():
    global bind_ip
    global bind_port
    global threads

    db = Database()
    db.connect()
    #db.init()
    db.commit()
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s.bind((bind_ip, bind_port))
    s.listen(20)
    while True:
        connection, address = s.accept()
        t = threading.Thread(target=handle_client, args=(connection, db))
        threads.append(t)
        t.setDaemon(True)
        t.start()
    db.disconnect()
    return
示例#10
0
def save(domains, path, filename, key):
    datas = []
    database = Database(
        os.path.join(
            os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
            'srcscan.db'))
    database.connect()
    database.init()
    for domain in domains:
        for _row in database.select_mondomain(domain):
            data = {
                "subdomain": _row[0],
                "url": _row[1],
                "title": _row[2],
                "status": _row[3],
                "len": _row[4],
                "update_time": _row[5],
                "domain": _row[6]
            }
            datas.append(data)

    tocsv(datas, path, filename, key)

    database.disconnect()
示例#11
0
def read_and_insert_bids(bids_dir, config_file, verbose, createcand,
                         createvisit):
    """
    Read the provided BIDS structure and import it into the database.

    :param bids_dir   : path to the BIDS directory
     :type bids_dir   : str
    :param config_file: path to the config file with database connection information
     :type config_file: str
    :param verbose    : flag for more printing if set
     :type verbose    : bool
    :param createcand : allow database candidate creation if it did not exist already
     :type createcand : bool
    :param createvisit: allow database visit creation if it did not exist already
     :type createvisit: bool
    """

    # database connection
    db = Database(config_file.mysql, verbose)
    db.connect()

    # grep config settings from the Config module
    default_bids_vl = db.get_config('default_bids_vl')
    data_dir = db.get_config('dataDirBasepath')

    # making sure that there is a final / in data_dir
    data_dir = data_dir if data_dir.endswith('/') else data_dir + "/"

    # load the BIDS directory
    bids_reader = BidsReader(bids_dir, verbose)
    if not bids_reader.participants_info          \
            or not bids_reader.cand_sessions_list \
            or not bids_reader.cand_session_modalities_list:
        message = '\n\tERROR: could not properly parse the following' \
                  'BIDS directory:' + bids_dir + '\n'
        print(message)
        sys.exit(lib.exitcode.UNREADABLE_FILE)

    # create the LORIS_BIDS directory in data_dir based on Name and BIDS version
    loris_bids_root_dir = create_loris_bids_directory(bids_reader, data_dir,
                                                      verbose)

    # loop through subjects
    for bids_subject_info in bids_reader.participants_info:

        # greps BIDS information for the candidate
        bids_id = bids_subject_info['participant_id']
        bids_sessions = bids_reader.cand_sessions_list[bids_id]

        # greps BIDS candidate's info from LORIS (creates the candidate if it
        # does not exist yet in LORIS and the createcand flag is set to true)
        loris_cand_info = grep_or_create_candidate_db_info(
            bids_reader, bids_id, db, createcand, loris_bids_root_dir, verbose)
        cand_id = loris_cand_info['CandID']
        center_id = loris_cand_info['RegistrationCenterID']

        # greps BIDS session's info for the candidate from LORIS (creates the
        # session if it does not exist yet in LORIS and the createvisit is set
        # to true. If no visit in BIDS structure, then use default visit_label
        # stored in the Config module)
        loris_sessions_info = grep_candidate_sessions_info(
            bids_sessions, bids_id, cand_id, loris_bids_root_dir, createvisit,
            verbose, db, default_bids_vl, center_id)

    # read list of modalities per session / candidate and register data
    for row in bids_reader.cand_session_modalities_list:
        bids_session = row['bids_ses_id']
        visit_label = bids_session if bids_session else default_bids_vl
        loris_bids_visit_rel_dir = 'sub-' + row[
            'bids_sub_id'] + '/' + 'ses-' + visit_label
        for modality in row['modalities']:
            loris_bids_modality_rel_dir = loris_bids_visit_rel_dir + '/' + modality + '/'
            lib.utilities.create_dir(
                loris_bids_root_dir + loris_bids_modality_rel_dir, verbose)

            if modality == 'eeg':
                Eeg(bids_reader=bids_reader,
                    bids_sub_id=row['bids_sub_id'],
                    bids_ses_id=row['bids_ses_id'],
                    bids_modality=modality,
                    db=db,
                    verbose=verbose,
                    data_dir=data_dir,
                    default_visit_label=default_bids_vl,
                    loris_bids_eeg_rel_dir=loris_bids_modality_rel_dir,
                    loris_bids_root_dir=loris_bids_root_dir)

            elif modality in ['anat', 'dwi', 'fmap', 'func']:
                Mri(bids_reader=bids_reader,
                    bids_sub_id=row['bids_sub_id'],
                    bids_ses_id=row['bids_ses_id'],
                    bids_modality=modality,
                    db=db,
                    verbose=verbose,
                    data_dir=data_dir,
                    default_visit_label=default_bids_vl,
                    loris_bids_mri_rel_dir=loris_bids_modality_rel_dir,
                    loris_bids_root_dir=loris_bids_root_dir)

    # disconnect from the database
    db.disconnect()
示例#12
0
文件: run.py 项目: aesmin/usagi
def main(args):
    if len(args) != 2:
        pass
    else:
        ds_config, meta_data_file = args

    if not os.path.exists(ds_config):
        print >> sys.stderr, "%s does not exists" % ds_config
        return 1

    if os.path.exists(meta_data_file):
        print >> sys.stderr, "%s already exists, please remove it first" % meta_data_file
        return 1

    if not os.path.exists(os.path.dirname(meta_data_file)):
        print >> sys.stderr, "dir %s does not exists, please create it first" % os.path.dirname(
            meta_data_file)
        return 1

    sc = SearchConfiguration(ds_config)
    sc.get_search_config()
    sc.parse()

    for handle in sc.handles:
        handle.connect()
        handle.copy_raw_meta_data(meta_data_file, append=True)
        handle.close()

    # meta to database
    doc = Document()
    doc.clear()
    with open(meta_data_file) as metadata:
        reader = csv.reader(metadata)
        for row in reader:
            if len(row) > 3:
                doc.create(row[0], row[1], row[2], row[3])
            else:
                doc.create(row[0], row[1], row[2])

    doc.update_filters()
    doc.close()

    # database to solr
    s = Solr()
    db = Database()
    db.connect()
    cursor = db.cursor()
    cursor.execute("SELECT * FROM documents")
    datalist = []
    for row in cursor.fetchall():
        datalist.append({
            "universal_id_s": row["universal_id"],
            "title_s": row["title"],
            "all_txt_ng": row["keywords"],
            "path_s": row["path"],
        })
    cursor.close()
    db.close()
    s.solr().delete(q="*:*")
    s.solr().add(datalist)

    return 0
示例#13
0
文件: word.py 项目: mrfhitz/Keylogger
 def __init__(self, computerName, cfg_file):
     self.__conn = Database.connect(cfg_file)
     self.__computerName = computerName
     self.__cursor = self.__conn.cursor()
def validate_dicom_archive(config_file, tarchive_path, upload_id, verbose):
    """
    Performs the different DICOM archive validations. This includes:
      - Verification of the DICOM archive against the checksum stored in the database
      - Verification of the PSC information using either PatientName or PatientID DICOM header
      - Verification of the ScannerID in the database (optionally create a new one if necessary)
      - Validity check of the candidate IDs associated to the DICOM archive based on the PatientName
      - Validity check of the session associated to the DICOM archive based on the PatientName
      - Update to the mri_upload's isTarchiveValidated field if above validations were successful

    :param config_file         : path to the config file with MySQL credentials
     :type config_file         : str
    :param tarchive_path       : path to the DICOM archive to validate against the database entries
     :type tarchive_path       : str
    :param upload_id           : UploadID associated to the DICOM archive
     :type upload_id           : int
    :param verbose             : be verbose
     :type verbose             : bool
    """

    # ----------------------------------------------------
    # establish database connection
    # ----------------------------------------------------
    db = Database(config_file.mysql, verbose)
    db.connect()

    # -----------------------------------------------------------------------------------
    # load the Config, Imaging, Tarchive, MriUpload, MriScanner and Notification classes
    # -----------------------------------------------------------------------------------
    config_obj = Config(db, verbose)
    imaging_obj = Imaging(db, verbose, config_file)
    tarchive_obj = Tarchive(db, verbose, config_file)
    mri_upload_obj = MriUpload(db, verbose)
    mri_scanner_obj = MriScanner(db, verbose)
    notification_obj = Notification(
        db,
        verbose,
        notification_type='python DICOM archive validation',
        notification_origin='dicom_archive_validation.py',
        process_id=upload_id)

    # ---------------------------------------------------------------------------------------------
    # grep config settings from the Config module & ensure that there is a final / in dicom_lib_dir
    # ---------------------------------------------------------------------------------------------
    dicom_lib_dir = config_obj.get_config('tarchiveLibraryDir')
    dicom_lib_dir = dicom_lib_dir if dicom_lib_dir.endswith(
        '/') else dicom_lib_dir + "/"

    # ----------------------------------------------------
    # determine the archive location
    # ----------------------------------------------------
    archive_location = tarchive_path.replace(dicom_lib_dir, '')

    # -------------------------------------------------------------------------------
    # update the mri_upload table to indicate that a script is running on the upload
    # -------------------------------------------------------------------------------
    mri_upload_obj.update_mri_upload(upload_id=upload_id,
                                     fields=('Inserting', ),
                                     values=('1', ))

    # ---------------------------------------------------------------------------------
    # create the DICOM archive array (that will be in tarchive_obj.tarchive_info_dict)
    # ---------------------------------------------------------------------------------
    success = tarchive_obj.create_tarchive_dict(archive_location, None)
    if not success:
        message = 'ERROR: Only archive data can be uploaded. This seems not to be a valid' \
                  ' archive for this study!'
        notification_obj.write_to_notification_spool(message=message,
                                                     is_error='Y',
                                                     is_verbose='N')
        print('\n' + message + '\n\n')
        mri_upload_obj.update_mri_upload(upload_id=upload_id,
                                         fields=('isTarchiveValidated',
                                                 'Inserting',
                                                 'IsCandidateInfoValidated'),
                                         values=('0', '0', '0'))
        sys.exit(lib.exitcode.INVALID_DICOM)
    else:
        tarchive_id = tarchive_obj.tarchive_info_dict['TarchiveID']
        mri_upload_obj.update_mri_upload(upload_id=upload_id,
                                         fields=('TarchiveID', ),
                                         values=(tarchive_id, ))
    tarchive_info_dict = tarchive_obj.tarchive_info_dict

    # ------------------------------------------------------------------------------
    # verify the md5sum of the DICOM archive against the one stored in the database
    # ------------------------------------------------------------------------------
    message = '==> verifying DICOM archive md5sum (checksum)'
    notification_obj.write_to_notification_spool(message=message,
                                                 is_error='N',
                                                 is_verbose='Y')
    if verbose:
        print('\n' + message + '\n')
    result = tarchive_obj.validate_dicom_archive_md5sum(tarchive_path)
    message = result['message']
    if result['success']:
        notification_obj.write_to_notification_spool(message=message,
                                                     is_error='N',
                                                     is_verbose='Y')
        if verbose:
            print('\n' + message + '\n')
    else:
        notification_obj.write_to_notification_spool(message=message,
                                                     is_error='Y',
                                                     is_verbose='N')
        print('\n' + message + '\n\n')
        mri_upload_obj.update_mri_upload(upload_id=upload_id,
                                         fields=('isTarchiveValidated',
                                                 'Inserting',
                                                 'IsCandidateInfoValidated'),
                                         values=('0', '0', '0'))
        sys.exit(lib.exitcode.CORRUPTED_FILE)

    # ----------------------------------------------------
    # verify PSC information stored in DICOMs
    # ----------------------------------------------------
    site_dict = imaging_obj.determine_study_center(tarchive_info_dict)
    if 'error' in site_dict.keys():
        message = site_dict['message']
        notification_obj.write_to_notification_spool(message=message,
                                                     is_error='Y',
                                                     is_verbose='N')
        print('\n' + message + '\n\n')
        mri_upload_obj.update_mri_upload(upload_id=upload_id,
                                         fields=('isTarchiveValidated',
                                                 'Inserting',
                                                 'IsCandidateInfoValidated'),
                                         values=('0', '0', '0'))
        sys.exit(site_dict['exit_code'])
    center_id = site_dict['CenterID']
    center_name = site_dict['CenterName']
    message = '==> Found Center Name: ' + center_name + ', Center ID: ' + str(
        center_id)
    notification_obj.write_to_notification_spool(message=message,
                                                 is_error='N',
                                                 is_verbose='Y')
    if verbose:
        print('\n' + message + '\n')

    # ---------------------------------------------------------------
    # grep scanner information based on what is in the DICOM headers
    # ---------------------------------------------------------------
    scanner_dict = mri_scanner_obj.determine_scanner_information(
        tarchive_info_dict, site_dict)
    message = '===> Found Scanner ID: ' + str(scanner_dict['ScannerID'])
    notification_obj.write_to_notification_spool(message=message,
                                                 is_error='N',
                                                 is_verbose='Y')
    if verbose:
        print('\n' + message + '\n')

    # ---------------------------------------------------------------------------------
    # determine subject IDs based on DICOM headers and validate the IDs against the DB
    # ---------------------------------------------------------------------------------
    subject_id_dict = imaging_obj.determine_subject_ids(
        tarchive_info_dict, scanner_dict['ScannerID'])
    is_subject_info_valid = imaging_obj.validate_subject_ids(subject_id_dict)
    if not is_subject_info_valid:
        # note: the script will not exit so that further down it can be inserted per
        # NIfTI file into MRICandidateErrors
        notification_obj.write_to_notification_spool(message=message,
                                                     is_error='Y',
                                                     is_verbose='N')
        print(subject_id_dict['CandMismatchError'])
        mri_upload_obj.update_mri_upload(upload_id=upload_id,
                                         fields=('IsCandidateInfoValidated', ),
                                         values=('0', ))
    else:
        message = subject_id_dict['message']
        notification_obj.write_to_notification_spool(message=message,
                                                     is_error='N',
                                                     is_verbose='Y')
        mri_upload_obj.update_mri_upload(upload_id=upload_id,
                                         fields=('IsCandidateInfoValidated', ),
                                         values=('1', ))

    # -----------------------------------------------------
    # update mri_upload table with IsTarchiveValidated = 1
    # -----------------------------------------------------
    mri_upload_obj.update_mri_upload(upload_id=upload_id,
                                     fields=('isTarchiveValidated',
                                             'Inserting'),
                                     values=('1', '0'))
示例#15
0
class RunTestCase(unittest.TestCase):
    def setUp(self):
        parentpath = (
            os.path.abspath(
                os.path.join(
                    os.path.dirname(os.path.realpath(__file__)),
                    os.pardir
                )
            )
        )
        manifestpath = os.path.join(parentpath, 'manifest.json')

        configpath = os.path.join(parentpath, 'config.json')
        rawsettings = None
        with open(configpath, 'r') as file_:
            rawsettings = json.load(file_)
        self.database = Database(rawsettings['options']['datasource'])

        rawmanifest = None
        with open(manifestpath, 'r') as file_:
            rawmanifest = json.load(file_)
        self.attributes = Attributes(rawmanifest['attributes'], self.database)

        self.threshold = rawsettings['options']['threshold']
        self.processes = 2

    def test_init(self):
        with tempfile.TemporaryDirectory() as directory:
            try:
                # Act
                run = Run(
                    directory, self.attributes, self.database, self.threshold,
                    self.processes
                )

                # Assert
                self.assertIsNotNone(run.run_id)
            finally:
                self.database.post(
                    'DELETE FROM reaper_runs WHERE id = {0}'.format(run.run_id)
                )
                self.database.disconnect()

    def test_save(self):
        with tempfile.TemporaryDirectory() as directory:
            # Arrange
            rresults = {
                'architecture': 9.9, 'continuous_integration': True,
                'community': 9, 'documentation': 9.9, 'history': 9.9,
                'license': True, 'management': 9.9, 'unit_test': 9.9,
                'state': 'active'
            }
            run = Run(
                directory, self.attributes, self.database, self.threshold,
                self.processes
            )

            # Act
            run._save(10868464, 99.99, rresults)

            # Assert
            try:
                self.database.connect()
                actual = self.database.get(
                    '''
                        SELECT project_id, architecture,
                            continuous_integration, community, documentation,
                            history, license, management, unit_test, state,
                            score
                        FROM reaper_results WHERE run_id = {0}
                    '''.format(run.run_id)
                )
                self.assertEqual(10868464, actual[0])
                self.assertEqual(9.9, actual[1])
                self.assertEqual(True, actual[2])
                self.assertEqual(9, actual[3])
                self.assertEqual(9.9, actual[4])
                self.assertEqual(9.9, actual[5])
                self.assertEqual(True, actual[6])
                self.assertEqual(9.9, actual[7])
                self.assertEqual(9.9, actual[8])
                self.assertEqual('active', actual[9])
                self.assertEqual(99.989998, actual[10])
            finally:
                self.database.post(
                    'DELETE FROM reaper_runs WHERE id = {0}'.format(run.run_id)
                )
                self.database.disconnect()
示例#16
0
from lib.database import Database as myDb

conn = myDb.connect()

cursor = conn.cursor()
cursor.execute('SELECT top 5 * from dbo.FHL_USR_MTRX')

for row in cursor:
    print(row)
示例#17
0
文件: core.py 项目: j14ncn/submon
def _run(domains_dic):
    database = Database(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'submon.db'))
    database.connect()
    database.init()
    now_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
    filename = 'SubMon_subdomain_check_' + time.strftime("%Y%m%d_%H%M%S", time.localtime()) + '.xlsx'
    path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "data")
    if not os.path.exists(path):
        os.makedirs(path)
    for key in domains_dic.keys():
        domains = list(set(domains_dic[key]))
        if len(domains) > 0:
            logger.sysinfo("Scanning %d domains at %s." % (len(domains), time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
            for domain in domains:
                logger.sysinfo("Scanning domain %s." % domain)
                _engines = [_(domain) for _ in engines.values()]
                loop = asyncio.get_event_loop()
                if debug:
                    loop.set_debug(True)
                for task in [asyncio.ensure_future(_engine.run()) for _engine in _engines ]:
                    loop.run_until_complete(task)
                # loop.close()
                ret = set()
                for _engine in _engines:
                    logger.sysinfo("{engine} Found {num} sites".format(engine=_engine.engine_name,
                                                                       num=len(_engine.results['subdomain'])))
                    ret.update(_engine.results['subdomain'])


                logger.sysinfo("Found %d subdomains of %s." % (len(ret),domain))
                for subdomain in ret:
                    database.insert_subdomain(subdomain,None,None,0,0,now_time,domain)

                logger.sysinfo('Checking %d subdomains of %s.' % (len(ret),domain))
                curl = Curl()
                curl.load_targets(ret)
                for subdomain,url,title,status,content_length in curl.run():
                    database.update_subdomain_status(subdomain,url,title,status,content_length,now_time)
                logger.sysinfo("Checked subdomains' status of %s." % domain)

            datas = []
            for domain in domains:
                for _row in database.select_mondomain(domain):
                    data = {
                        "subdomain": _row[0],
                        "url": _row[1],
                        "title": _row[2],
                        "status": _row[3],
                        "len": _row[4],
                        "update_time" : _row[5],
                        "domain": _row[6]
                    }
                    datas.append(data)
            tocsv(datas, path,filename,key)
            logger.sysinfo("Fineshed scan %d domains at %s." % (len(domains), time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
        else:
            logger.error("Loading %d domains." % (len(domains)))
    send_smtp(path, filename)
    database.disconnect()
    print()
    print()
示例#18
0
def make_pic(file_id, config_file, verbose):
    """
    Call the function create_imaging_pic of the Imaging class on
    the FileID provided as argument to this function.

    :param file_id    : FileID of the file for which to create the pic
     :type file_id    : int
    :param config_file: path to the config file with database connection information
     :type config_file: str
    :param verbose    : flag for more printing if set
     :type verbose    : bool
    """

    # database connection
    db = Database(config_file.mysql, verbose)
    db.connect()

    # grep config settings from the Config module
    data_dir = db.get_config('dataDirBasepath')

    # making sure that there is a final / in data_dir
    data_dir = data_dir if data_dir.endswith('/') else data_dir + "/"

    # load the Imaging object
    imaging = Imaging(db, verbose)

    # grep the NIfTI file path
    nii_file_path = imaging.grep_file_path_from_file_id(file_id)
    if not nii_file_path:
        print('WARNING: no file in the database with FileID = ' + file_id)
        return
    if not re.search('.nii.gz$', nii_file_path):
        print('WARNING: wrong file type. File ' + nii_file_path +
              ' is not a .nii.gz file')
        return
    if not os.path.exists(data_dir + nii_file_path):
        print('WARNING: file ' + nii_file_path +
              ' not found on the filesystem')
        return

    # grep the time length from the NIfTI file header
    is_4d_dataset = False
    length_parameters = imaging.get_nifti_image_length_parameters(
        data_dir + nii_file_path)
    if len(length_parameters) == 4:
        file_parameters['time'] = length_parameters[3]
        is_4d_dataset = True

    # grep the CandID of the file
    cand_id = imaging.grep_cand_id_from_file_id(file_id)
    if not cand_id:
        print('WARNING: CandID not found for FileID ' + file_id)

    # create the pic
    pic_rel_path = imaging.create_imaging_pic({
        'cand_id': cand_id,
        'data_dir_path': data_dir,
        'file_rel_path': nii_file_path,
        'is_4D_dataset': is_4d_dataset,
        'file_id': file_id
    })
    if not os.path.exists(data_dir + 'pic/' + pic_rel_path):
        print('WARNING: the pic ' + data_dir + 'pic/' + pic_rel_path +
              'was not created')
        return

    # insert the relative path to the pic in the parameter_file table
    imaging.insert_parameter_file(file_id, 'check_pic_filename', pic_rel_path)