Exemplo n.º 1
0
 def get_api_data(self):
     """
     Sends API request to IGDB. 
     If response is equal to limit of 50, sends another until complete list is retrieved
     or limit of 200 is reached.
     """
     try:
         response = requests.request("GET",
                                     self.url,
                                     headers=self.headers,
                                     data=self.payload)
         response_list = yaml.load(response.text)
         while len(yaml.load(response.text)) is self.limit and (
                 self.offset + self.limit) <= 150:
             self.offset += self.limit
             self.payload = "{} limit {}; offset {};".format(
                 self.search, self.limit, self.offset)
             response = requests.request("GET",
                                         self.url,
                                         headers=self.headers,
                                         data=self.payload)
             response_list += yaml.load(response.text)
     except Exception as e:
         logging.Exception("Exception with payload: " + self.payload)
         logging.Exception(str(e))
         raise
     return response_list
def init_conn(db):
    con = None

    try:
        con = sqlite3.connect(db)

    except sqlite3.Error as e:
        logging.Exception("Database error: %s" % e)
    except Exception as e:
        logging.Exception("Exception in _query: %s" % e)
    return con
Exemplo n.º 3
0
 def connect(self):
     try:
         print self.client[str(self.db_name)]
     except Exception as ex:
         print logging.Exception(ex)
         return False
     return True
Exemplo n.º 4
0
def mc_clean():
    try:
        mc = memcache.Client(["localhost:11211"], debug=1)
        return mc.flush_all()
    except Exception as m:
        log.Exception(m)
        raise SystemExit, 1
Exemplo n.º 5
0
 def on_stop_recording(self, task):
     """Notification that we are done with recording"""
     import psutil
     if self.cpu_start is not None:
         cpu_end = psutil.cpu_times()
         cpu_busy = (cpu_end.user - self.cpu_start.user) + \
             (cpu_end.system - self.cpu_start.system)
         cpu_total = cpu_busy + (cpu_end.idle - self.cpu_start.idle)
         cpu_pct = cpu_busy * 100.0 / cpu_total
         task['page_data']['fullyLoadedCPUms'] = int(cpu_busy * 1000.0)
         task['page_data']['fullyLoadedCPUpct'] = cpu_pct
         self.cpu_start = None
     self.recording = False
     if self.thread is not None:
         self.thread.join(10)
         self.thread = None
     # record the CPU/Bandwidth/memory info
     if self.usage_queue is not None and not self.usage_queue.empty(
     ) and task is not None:
         file_path = os.path.join(task['dir'],
                                  task['prefix']) + '_progress.csv.gz'
         gzfile = gzip.open(file_path, GZIP_TEXT, 7)
         if gzfile:
             gzfile.write(
                 "Offset Time (ms),Bandwidth In (bps),CPU Utilization (%),Memory\n"
             )
             try:
                 while True:
                     snapshot = self.usage_queue.get(5)
                     if snapshot is None:
                         break
                     gzfile.write('{0:d},{1:d},{2:0.2f},-1\n'.format(
                         snapshot['time'], snapshot['bw'], snapshot['cpu']))
             except Exception:
                 logging.Exception("Error processing usage queue")
             gzfile.close()
     if self.tcpdump is not None:
         logging.debug('Waiting for tcpdump to stop')
         from .os_util import wait_for_all
         if platform.system() == 'Windows':
             wait_for_all('WinDump')
         else:
             wait_for_all('tcpdump')
         self.tcpdump = None
     if self.ffmpeg is not None:
         logging.debug('Waiting for video capture to finish')
         if platform.system() == 'Windows':
             self.ffmpeg.communicate(input='q'.encode('utf-8'))
         else:
             self.ffmpeg.communicate(input='q')
         self.ffmpeg = None
     if platform.system() == 'Windows':
         from .os_util import kill_all
         kill_all('ffmpeg.exe', True)
     else:
         subprocess.call(['killall', '-9', 'ffmpeg'])
     self.job['shaper'].reset()
Exemplo n.º 6
0
def _delete_local(local_base, files, dryrun):
    for name, mtime, size in files:
        logging.info("rem_: %s/%s" % (local_base, name))
        if dryrun:
            continue
        local_path = os.path.join(local_base, name)
        try:
            os.remove(local_path)
        except Exception as e:
            logging.Exception("unable to delete: %s" % local_path)
Exemplo n.º 7
0
 def insert_tag(self, tag_name, is_company=0):
     try:
         c = self.conn.cursor()
         c.execute('''INSERT OR IGNORE INTO cup_tag VALUES(?,?)''',
                   (tag_name, is_company))
         # self.conn.commit()
     except:
         logging.Exception('Failed to insert tag: ' + str(sys.exc_info()))
         raise
     finally:
         c.close()
Exemplo n.º 8
0
 def insert_question_with_tag(self, question_id, tag_name):
     try:
         c = self.conn.cursor()
         c.execute(
             '''INSERT OR IGNORE INTO cup_question_with_tag VALUES(?, ?)''',
             (question_id, tag_name))
         # self.conn.commit()
     except:
         logging.Exception('Failed to insert question_with_tag: ' +
                           str(sys.exc_info()))
         raise
     finally:
         c.close()
Exemplo n.º 9
0
def extract_rois((full_path_to_file, individualframename)):
    # Create SIMA Sequence & ImagingDataset objects from image file(s) or
    # motion correction if action == 'extract', assume that motion
    # correction was done on EC2 previously
    if action == 'both':
        try:
            motion_correction((full_path_to_file, individualframename))
        except Exception as e:
            print('Motion correction failed')
            print e
            logging.Exception('Motion correction failed')

    filename = os.path.splitext(os.path.basename(full_path_to_file))[0]
    dataset = sima.ImagingDataset.load(filename + '_mc.sima')

    # Obtain ROIs
    if to_segment:
        logging.info("Segmenting images for %s..." % filename)

        # Automated segmentation
        # Define segmentation method and post processing.
        segment_approach = sima.segment.PlaneNormalizedCuts()
        segment_approach.append(sima.segment.SparseROIsFromMasks())
        segment_approach.append(sima.segment.SmoothROIBoundaries())
        segment_approach.append(sima.segment.MergeOverlapping(threshold=0.5))

        # Apply segmentation to dataset
        rois = dataset.segment(segment_approach)

        logging.info("Done segmenting images for %s" % filename)

        print("Done segmenting images for %s" % filename)
    else:
        logging.info("Importing ROIs from ImageJ for %s..." % filename)
        print("Importing ROIs from ImageJ for %s..." % filename)

        # Load ROIs from ImageJ
        rois = ROIList.load(filename + '_mc_' + roi_filename, fmt='ImageJ')
        dataset.add_ROIs(rois, 'from_ImageJ')

        logging.info("Done importing ROIs for %s" % filename)
        print("Done importing ROIs for %s" % filename)

    # Extract signals from ROIs into numpy file
    signals = dataset.extract(rois)
    extracted_signals = np.asarray(signals['raw'])
    np.save(filename + '_extractedsignals', extracted_signals)

    logging.info("Done extracting signals")
    print("Done extracting signals")
Exemplo n.º 10
0
def test():
    clf3 = joblib.load("save/clf.pkl")
    test_images = get_images("test_images/")
    for each in test_images:
        img = skimage.io.imread(each)
        img = get_image_feature(img)
        img_resized = resize(img,
                             dimension,
                             anti_aliasing=True,
                             mode='reflect')
        flat_data.append(img_resized.flatten())
        try:
            print(clf3.predict(img_resized.flatten()))
        except Exception as e:
            import logging
            logging.Exception(e)
    flat_data = np.array(flat_data)
    print(clf3.predict(flat_data[0:5]))
Exemplo n.º 11
0
    def insert_question(self, question):
        last_id = -1
        try:
            c = self.conn.cursor()
            c.execute(
                '''INSERT OR IGNORE INTO cup_question VALUES(NULL, ?, ?, ?, ?, ?, ?, datetime('now'), 0 )''',
                (question.question_id, question.question_content,
                 question.link, question.up_votes, question.comment_count,
                 question.creation_date))
            # self.conn.commit()
            last_id = c.lastrowid
        except:
            logging.Exception('Failed to insert question: ' +
                              str(sys.exc_info()))
            raise
        finally:
            c.close()

        return last_id
def parse_pe_pkl(file_index, file_id, fjson, unprocessed):
    """ Function to parse pickle file to find the boundaries of PE sections in a sample's pickle representation
    Args:
        file_index: PE sample index
        file_id: PE name
        fjson: pickle data representation of PE sample
        unprocessed: keeps track of count of samples not processed properly
    Returns:
         section_bounds: PE section boundaries
         unprocessed: keeps track of count of samples not processed properly
         file_byte_size: size of full sample
    """
    section_bounds = []
    file_byte_size = None
    max_section_end_offset = 0
    try:
        # file_byte_size = fjson['size_byte']
        with open(cnst.RAW_SAMPLE_DIR + file_id, 'rb') as f:
            file_byte_size = len(list(f.read()))
        pe = pefile.PE(cnst.RAW_SAMPLE_DIR + file_id)
        for pkl_section in pe.sections:
            section_bounds.append(
                (pkl_section.Name.strip(b'\x00').decode("utf-8").strip(),
                 pkl_section.PointerToRawData,
                 pkl_section.PointerToRawData + pkl_section.SizeOfRawData))
            if (pkl_section.PointerToRawData +
                    pkl_section.SizeOfRawData) > max_section_end_offset:
                max_section_end_offset = (pkl_section.PointerToRawData +
                                          pkl_section.SizeOfRawData)

        # Placeholder section "padding" - for activations in padding region
        # if max_section_end_offset < fjson["size_byte"]:
        #    section_bounds.append((cnst.TAIL, max_section_end_offset + 1, fjson["size_byte"]))
        # section_bounds.append((cnst.PADDING, fjson["size_byte"] + 1, cnst.MAX_FILE_SIZE_LIMIT))
    except Exception as e:
        logging.Exception("parse failed . . . [FILE INDEX - " +
                          str(file_index) + "]  [" + str(file_id) + "] ")
        unprocessed += 1
    return section_bounds, unprocessed, file_byte_size
Exemplo n.º 13
0
 def on_stop_recording(self, task):
     """Notification that we are done with an operation that needs to be recorded"""
     if self.thread is not None:
         self.thread.join(10)
         self.thread = None
     # record the CPU/Bandwidth/memory info
     self.recording = False
     if self.usage_queue is not None and not self.usage_queue.empty(
     ) and task is not None:
         file_path = os.path.join(task['dir'],
                                  task['prefix']) + '_progress.csv.gz'
         gzfile = gzip.open(file_path, GZIP_TEXT, 7)
         if gzfile:
             logline = "Offset Time (ms),Bandwidth In (bps),CPU Utilization (%),Memory\n"
             logging.debug(logline)
             gzfile.write(logline)
             try:
                 while True:
                     snapshot = self.usage_queue.get(5)
                     if snapshot is None:
                         break
                     logline = '{0:d},{1:d},-1,-1\n'.format(
                         snapshot['time'], snapshot['bw'])
                     logging.debug(logline)
                     gzfile.write(logline)
             except Exception:
                 logging.Exception("Error processing usage queue")
             gzfile.close()
     if self.tcpdump_enabled:
         tcpdump = os.path.join(task['dir'], task['prefix']) + '.cap'
         self.adb.stop_tcpdump(tcpdump)
     if self.video_enabled and not self.job['disable_video']:
         task['video_file'] = os.path.join(task['dir'],
                                           task['prefix']) + '_video.mp4'
         self.adb.stop_screenrecord(task['video_file'])
     self.job['shaper'].reset()
Exemplo n.º 14
0
    def create_database(self):
        try:
            c = self.conn.cursor()
            c.execute('''CREATE TABLE IF NOT EXISTS cup_question (
                question_id INTEGER PRIMARY KEY ON CONFLICT IGNORE,
                question_identifier TEXT UNIQUE NOT NULL,
                content TEXT NOT NULL,
                link TEXT,
                up_votes INTEGER,
                comment_count INTEGER,
                creation_date DATE,
                creation_time_in_db DATE,
                is_deleted INTEGER
                )''')

            c.execute('''CREATE TABLE IF NOT EXISTS cup_tag (
                name TEXT PRIMARY KEY,
                is_company INTEGER
                )''')

            c.execute('''CREATE TABLE IF NOT EXISTS cup_question_with_tag (
                question_id INTEGER,
                tag_name TEXT,
                PRIMARY KEY (question_id, tag_name)
                )''')

            self.conn.commit()
            logging.info('Database initialized.')

        except:
            logging.Exception('Failed to create databse: ' +
                              str(sys.exc_info()))
            raise

        finally:
            c.close()
        p_icd = df_grouped_icd.loc[df_grouped_icd['dsysrtky'] ==
                                   p_info['dsysrtky']]
        df_p = prepare_data(p_info, p_icd.iloc[0], window_back, window_forward,
                            shift)
        frame.append(df_p)
    df_all = pd.concat(frame, ignore_index=True)
    return df_all


try:
    df_training = combine_patient(tr, df_grouped_icd, window_back,
                                  window_forward, shift)
    df_testing = combine_patient(ts, df_grouped_icd, window_back,
                                 window_forward, shift_test)
except Exception as e:
    logging.Exception("Unexpected exception! %s", e)

print(df_training.shape)
print(df_testing.shape)

save_csv = 1
if save_csv == 1:
    training_file = dir_csv + 'blance' + str(
        balance) + testing_delay_indicate + 'new_df_training' + str(
            window_forward) + '_' + str(shift) + '.csv'
    testing_file = dir_csv + 'blance' + str(
        balance) + testing_delay_indicate + 'new_df_training' + str(
            window_forward) + '_' + str(shift) + '.csv'
    df_training.to_csv(training_file)
    df_testing.to_csv(testing_file)
    logging.critical('training data is saved to file' + training_file)