示例#1
0
def extract_ipgs(filename, count=None, to_ip=None, from_ip=None):
    cache_name = generate_cache_name(filename, '_ipgs', to_ip, from_ip, count)
    if os.path.exists(cache_name):
        print "Hit a cache extracting IPGs!"
        with open(cache_name) as f:
            with flock.Flock(f, flock.LOCK_EX) as lock:
                return [Decimal(x) for x in (f.readlines()[0].split(','))]
    metadatas = extract_expcap_metadatas(filename,
                                         count=count,
                                         to_ip=to_ip,
                                         from_ip=from_ip)
    ipgs = []
    start = get_first_enabled(metadatas)

    if start == None:
        return []

    last_end = metadatas[start].wire_end_time()

    for expcap_packet in metadatas[start + 1:]:
        if expcap_packet.is_enabled():
            ipgs.append(expcap_packet.wire_end_time() - last_end)
            last_end = expcap_packet.wire_end_time()

    with open(cache_name, 'w') as f:
        with flock.Flock(f, flock.LOCK_EX) as lock:
            f.write(','.join([str(ipg) for ipg in ipgs]))

    return ipgs
示例#2
0
def extract_deltas(filename, column=7, to_ip=None, from_ip=None):
    cache_name = generate_cache_name(filename, '_deltas', to_ip, from_ip, None)
    if os.path.exists(cache_name):
        print "Hit a cache extracting deltas!"
        with open(cache_name) as f:
            with flock.Flock(f, flock.LOCK_EX) as lock:
                return [
                    Decimal(delta) for delta in f.readlines()[0].split(',')
                ]

    times = extract_times(filename, column, to_ip=to_ip, from_ip=from_ip)
    if len(times) == 0:
        return []

    diffs = []
    last_time = times[0]
    for pkt_time in times[1:]:
        diffs.append(pkt_time - last_time)
        last_time = pkt_time

    with open(cache_name, 'w') as f:
        with flock.Flock(f, flock.LOCK_EX) as lock:
            f.write(','.join([str(diff) for diff in diffs]))

    return diffs
def train_model(node, freq):

    rnn = Sequential()
    rnn_layer = GRU(units, input_shape=input_shape)
    rnn.add(rnn_layer)

    rnn.add(Dropout(0.5))
    rnn.add(Dense(1))
    opt = adam(lr=0.001, decay=0.0)

    rnn.compile(loss='mse', optimizer=opt)
    print("COMPILED")
    weights_file = weights_dir_base + node + '/' + freq + '.h5'
    if exists(weights_file):

        with open(weights_file, 'r') as w_file:
            with flock.Flock(w_file, flock.LOCK_EX) as lock:
                rnn.load_weights(weights_file)
        these_epochs = epochs
    else:
        these_epochs = epochs_init

    recent_smooth_lines = subprocess.check_output([
        'tail', '-n' + str(train_size + pred_step),
        smooth_dir_base + node + "/" + str(freq) + ".out"
    ])
    recent_smooth_lines = recent_smooth_lines.decode("utf-8")
    recent_smooths = []
    for val in recent_smooth_lines.split("\n")[:-1]:
        splits = val.split(",")
        if len(splits) == 2:
            recent_smooths.append(float(splits[1]))
    #recent_smooths = [float(val.split(",")[1]) for val in recent_smooth_lines.split("\n")[:-1]]
    input_data = []
    output_data = []
    print(train_size, pred_step, lb)
    for i in range(train_size - lb - pred_step + 1):
        input_data.append(recent_smooths[i:i + lb])
        output_data.append(recent_smooths[i + lb + pred_step])

    input_data = np.reshape(np.array(input_data), (-1, 1, lb))
    output_data = np.reshape(np.array(output_data), (-1, 1))
    print(input_data.shape)

    input_data = normalize(input_data)
    output_data = normalize(output_data)
    rnn.fit(input_data, output_data, epochs=these_epochs)
    out = rnn.predict(input_data)
    print(np.min(output_data), np.max(output_data))

    print(np.min(out), np.max(out))

    if not exists(weights_dir_base + node + '/'):
        makedirs(weights_dir_base + node + '/')
    with open(weights_file, 'w') as w_file:
        with flock.Flock(w_file, flock.LOCK_EX) as lock:
            rnn.save_weights(weights_file)
示例#4
0
def replace_inet_lib(project_name, git_hash):
    directory = INET_LIB_CACHE_DIR + "/" + git_hash

    os.makedirs(directory, exist_ok=True)

    LOGGER.info("replacing inet lib with the one built from commit " +
                git_hash)

    with open(directory + "/download.lock", "wb") as lock_file:
        LOGGER.info(
            "starting download, or waiting for the in-progress download to finish"
        )
        with flock.Flock(lock_file, flock.LOCK_EX):
            LOGGER.info("download lock acquired")
            try:
                with open(directory + "/libINET.so", "xb") as f:

                    LOGGER.info(
                        "we have just created the file, so we need to download it"
                    )
                    client = pymongo.MongoClient(MONGO_HOST)
                    gfs = gridfs.GridFS(client.opp)
                    LOGGER.info("connected, downloading")
                    f.write(gfs.get(git_hash).read())
                    LOGGER.info("download done")

            except FileExistsError:
                LOGGER.info("the file was already downloaded")

    shutil.copy(directory + "/libINET.so", get_project_lib_file(project_name))
    LOGGER.info("file copied to the right place")
示例#5
0
def extract_times(filename, column=7, count=None, to_ip=None, from_ip=None):
    times = []

    cache_name = generate_cache_name(filename, '_times', to_ip, from_ip, count)
    if os.path.exists(cache_name):
        print "Hit a cache extracting arrival times!"
        with open(cache_name) as f:
            with flock.Flock(f, flock.LOCK_EX) as lock:
                return [Decimal(x) for x in (f.readlines()[0].split(','))]

    expcap_metadatas = \
        extract_expcap_metadatas(filename, count=count, to_ip=to_ip, from_ip=from_ip)
    start = get_first_enabled(expcap_metadatas)

    for expcap in expcap_metadatas:
        if expcap.is_enabled():
            times.append(expcap.wire_start_time())

    with open(cache_name, 'w') as f:
        with flock.Flock(f, flock.LOCK_EX) as lock:
            f.write(','.join([str(time) for time in times]))

    return times
示例#6
0
def extract_sizes(filename, count=None, to_ip=None, from_ip=None):
    cache_name = generate_cache_name(filename, '_sizes_list', to_ip, from_ip,
                                     count)
    if os.path.exists(cache_name):
        print "Hit a cache extracting packet sizes!"
        with open(cache_name) as f:
            with flock.Flock(f, flock.LOCK_EX) as lock:
                sizes = f.readlines()[0].split(',')
                return [int(size) for size in sizes]

    metadatas = extract_expcap_metadatas(filename,
                                         count,
                                         to_ip=to_ip,
                                         from_ip=from_ip)
    sizes = []
    for expcap_packet in metadatas:
        if expcap_packet.is_enabled():
            sizes.append(expcap_packet.length)

    with open(cache_name, 'w') as f:
        with flock.Flock(f, flock.LOCK_EX) as lock:
            f.write(','.join([str(size) for size in sizes]))
    return sizes
def main():
    func_name = sys._getframe().f_code.co_name

    # Load config
    config_ini = configparser.ConfigParser()
    config_ini.read('conf/config.ini', encoding='utf-8')

    os.environ['HTTP_PROXY'] = 'https://proxygate2.nic.nec.co.jp:8080'
    os.environ['HTTPS_PROXY'] = 'https://proxygate2.nic.nec.co.jp:8080'
    os.environ['NLS_LANG'] = 'Japanese_Japan.AL32UTF8'

    # Set logger
    log_ini = configparser.ConfigParser()
    log_ini.read('conf/log.ini', encoding='utf-8')
    log = log_ini['sync_hr']
    logfile = config_ini['logger']['logfile']
    rotation = config_ini['logger']['rotation']
    retention = int(config_ini['logger']['retention'])
    logger.remove()
    logger.add(logfile, rotation=rotation, retention=retention)

    # Set flock object
    flock_obj = flock.Flock(config_ini, log_ini)

    # Get nowtime
    nowtime = datetime.datetime.now()

    # Exclusive lock
    if not flock_obj.lock():
        logger_obj.warning(log['300000'], func_name)
        sys.exit()

    # Synchronized human resources
    hr_obj = hr.Hr(config_ini, log_ini)

    # Get nowtime
    nowtime = datetime.datetime.now()

    # Update employees
    hr_obj.update_employees(nowtime)

    # Update organizations
    hr_obj.update_organizations(nowtime)

    # Unlock
    flock_obj.unlock()

    # Exit main
    sys.exit()
示例#8
0
def check_msg_queue():
    ''' 检查环信消息发送队列,发出待发送的消息。'''
    with open('/tmp/yym_check_msg_queue.lock', 'w') as f:
        blocking_lock = flock.Flock(f, flock.LOCK_EX | flock.LOCK_NB)

        try:
            with blocking_lock:
                print 'Got lock and checking messages queue:'
                announce_id_groups = db.session.query(
                    Message.announce_id, Message.sender_user_id,
                    Message.content,
                    Message.ext).filter(Message.pushed == False).group_by(
                        Message.announce_id, Message.sender_user_id,
                        Message.content, Message.ext).order_by(
                            Message.announce_id.desc()).all()
                for announce_id_group in announce_id_groups:
                    announce_id, sender_user_id, content, ext = announce_id_group
                    query = db.session.query(Message).filter(
                        Message.pushed == False
                    ).filter(Message.announce_id == announce_id).filter(
                        Message.sender_user_id == sender_user_id).filter(
                            Message.content == content).filter(
                                Message.ext == ext).order_by(Message.id.desc())
                    messages_groups = group(query, 20)
                    for messages_group in messages_groups:
                        messages = messages_group
                        message = messages[0]
                        sender = util.get_info_user(message.sender_user_id)
                        receivers = util.get_info_users(
                            map(lambda message: message.receiver_user_id,
                                messages))
                        if message.announce_id:
                            announce = util.get_info_announce(
                                message.announce_id)
                            msg = u'' if not announce else announce.content
                        else:
                            msg = message.content
                        ext = json.loads(message.ext)
                        resp = send_message(sender, receivers, msg, ext)
                        # 记录发送状态
                        success, result = resp
                        if success:
                            for message in messages:
                                message.pushed = True
                            db.session.commit()
                            print '* Sent messages:', ' '.join(
                                map(lambda x: str(x.id), messages))
        except IOError, e:
            print 'Checking messages queue job has been under processing!'
示例#9
0
文件: ads1x15.py 项目: sitbon/spider
    def __init__(self, address=0x48, ic=IC_ADS1115, debug=False):
        self.dev = I2CDevice(address, debug=debug)
        self.address = address
        self.debug = debug
        self.lock = flock.Flock('/tmp/ADS1X15.lock')

        # Make sure the IC specified is valid
        if (ic != IC_ADS1015) and (ic != IC_ADS1115):
            if self.debug:
                print "ADS1x15: Invalid IC specfied: 0x%02X" % ic
        else:
            self.ic = ic

        # Set pga value, so that getLastConversionResult() can use it,
        # any function that accepts a pga value must update this.
        self.pga = 6144
示例#10
0
def record_fixture_fly_towards_the_middle(config_file, fixture_file):
    # Initial positions
    boids = fl.Flock(config_file)
    positions = boids.get_positions().tolist()
    velocities = boids.get_velocities().tolist()

    # Call method
    boids.fly_towards_the_middle()

    # Updated positions
    positions_after = boids.get_positions().tolist()
    velocities_after = boids.get_velocities().tolist()

    positions.extend(velocities)
    positions_after.extend(velocities_after)

    fixture = {"before": positions, "after": positions_after}
    fixture_file_handle = open(fixture_file, 'w')
    fixture_file_handle.write(yaml.dump(fixture))
    fixture_file_handle.close()
def submit():
    global name
    used = 0
    name = byText.formatName(name)
    title = titleBoxText.get()
    desc = descriptionBoxText.get()
    introQuestion = introBoxText.get()
    frame.destroy()
    print("**************************************")
    print("Starting Video Creation")
    print("**************************************\n")

    print("Getting Lock File...",end="",flush=True)
    with open(fileLock_file,'w') as fp:
        with flock.Flock(fp,flock.LOCK_EX) as lock:
            print("Lock secured")
            byText.clearFolders()
            byText.makeTitle(introQuestion,introQuestion+"...Be sure to Subscribe!",name)
            byText.makeStatic("Next Comment")
            for index,i in enumerate(posts):
                if i.selected.get()==True and i.parent==0:
                    list_posts = [i]+i.kids
                    imgNumber = 0
                    for post in list_posts:
                        imgNumber = byText.outImgAndSound(post.textList,post.user,used,imgNumber)+1
                    byText.makeFullPostVideo(used,name)
                    used+=1
            byText.makeFullVideo(name)
            vid_file = "../../out/{}.mp4".format(name)
            if len(sys.argv)==1:
                print("Video made but not uploaded. Exiting!")
                driver.quit()
                sys.exit(0)
            ret = upload_video(vid_file,title,desc)
            if ret==0:
                print("Video created!")
            else:
                print("Failed to create video")
            driver.quit()
            sys.exit(0)
示例#12
0
    def __init__(self, parent=None):
        super(NGLWidget, self).__init__(parent)
        self.cam = Camera()
        self.mouseGlobalTX = Mat4()
        self.width = 1024
        self.height = 720
        self.setWindowTitle('Genetic Boids - Generation: ' + str(0) + 'MiniGen: ' + str(0) + '| ' + str(0) + '/' + str(0))
        self.spinXFace = 0
        self.spinYFace = 0
        self.rotate = False
        self.translate = False
        self.origX = 0
        self.origY = 0
        self.origXPos = 0
        self.origYPos = 0
        self.INCREMENT = 0.01
        self.ZOOM = 0.1
        self.modelPos = Vec3()
        self.flock = flock.Flock()

        self.flock.AddFood(7)
        self.startTimer(updateframes)
        self.start = False
win_height = 600
screen = pygame.display.set_mode((win_width, win_height))
fontObj = pygame.font.SysFont("Courier New", 12)
clock = pygame.time.Clock()
done = False
paused = False

# This is a list of circular "obstacles" (pos_vector, rad)
obstacles = []
for i in range(3):
    x = random.randint(0, win_width)
    y = random.randint(0, win_height)
    obstacles.append([math3d.VectorN(x, y), random.randint(50, 150)])

# Create the flock.  Flock-members shouldn't spawn on obstacles (if doing the bonus)
F = flock.Flock((0, 0, win_width, win_height), 20, obstacles)

# The mouse position (or None if the user isn't clicking)
mpos = None

# Game Loop
while not done:
    # Update
    deltaTime = clock.tick() / 1000.0
    if paused:
        deltaTime = 0.0  # Everything remains functional, but we don't move anything...
    F.update(deltaTime, mpos)

    # Input
    event = pygame.event.poll()
    if event.type == pygame.KEYDOWN and event.key == pygame.K_p:
            if attempt>30:
                sys.exit(-1)
            driver.execute_script("arguments[0].scrollIntoView();",lastEl)
            print("Searching for more posts...")
            time.sleep(5) #give extra time to load new elements
            oldPosts = set(posts)
        except:
            print("Couldnt scroll last element, exit failure")
            sys.exit(-1)
            break
    print("Pulled all text. Making video now...")
    driver.quit()

    print("Getting Lock File...",end="",flush=True)
    with open(fileLock_file,'w') as fp:
        with flock.Flock(fp,flock.LOCK_EX) as lock:
            print("Lock secured")
            clearFolders()
            makeStatic("Next Post")
            makeThumbnail(subName,episodeNum)
            makeTitle(vid_title,voice_text,name)

            for i in range(len(listOfPosts)):
                    postText = listOfPosts[i][0]
                    user = listOfPosts[i][1]
                    outImgAndSound(postText,user,i)
                    makeFullPostVideo(i,name)
            makeFullVideo(name)
            vid_file = "../../out/{}.mp4".format(name)
            vid_title = "Best of {} Episode {} [The Reddit Experiment]".format(urlKey,episodeNum)
            vid_des = "Reddit posts directly from {}, read aloud for your convience and pleasure :) ".format(urlKey)
示例#15
0
def extract_flow_sizes(filename):
    cache_name = generate_cache_name(filename, '_flow_sizes', None, None, None)

    if os.path.exists(cache_name):
        print "Hit a cache extracting flow sizes!"
        with open(cache_name) as f:
            with flock.Flock(f, flock.LOCK_EX) as lock:
                lines = f.readlines()
                if len(lines) == 0:
                    return []
                data = lines[0]
                lengths = [int(x) for x in data.split(',')]
                return lengths

    # Get all the TCP packets, then look for SYNs and FINs.
    metadatas = extract_expcap_metadatas(filename)

    flows = {}
    flow_count = 0
    flow_sizes = []
    disabled_count = 0
    for packet in metadatas:
        if packet.is_disabled():
            disabled_count += 1
            continue

        if packet.is_ip() and packet.is_tcp() and packet.is_tcp_syn():
            # print "Starting flow for ", packet.packet_data
            identifier = tcp_flow_identifier(packet)
            if PROCESS_CSV_DEBUG:
                print "Flow ID is ", identifier
            # print "Start time is", packet.wire_start_time()
            flows[identifier] = packet.tcp_data_length
            flow_count += 1
        elif packet.is_ip() and packet.is_tcp() and (packet.is_tcp_fin()
                                                     or packet.is_tcp_rst()):
            identifier = tcp_flow_identifier(packet)
            if identifier in flows:
                flow_sizes.append(packet.tcp_data_length + flows[identifier])
                # Remove that flow, we don't want to count
                # everything twice.  We'll only delete it on
                # the first FIN, but that's OK.
                del flows[identifier]
            elif PROCESS_CSV_DEBUG:
                print "Warning! Found a FIN/RST for a flow we didn't see a SYN for!"
        elif packet.is_tcp():
            identifier = tcp_flow_identifier(packet)
            if identifier in flows:
                flows[identifier] += packet.tcp_data_length
            elif PROCESS_CSV_DEBUG:
                print "Saw a TCP packet for a flow we didn't SYN to!"

    if len(flows) > 0:
        print "Warning: Saw ", len(
            flows), " SYNs for flows that weren't closed"

    with open(cache_name, 'w') as f:
        with flock.Flock(f, flock.LOCK_EX) as lock:
            f.write(','.join([str(length) for length in flow_sizes]))

    print "Flow count is ", flow_count
    print disabled_count, "Packets disbaled"
    return flow_sizes
示例#16
0
def extract_flow_lengths(filename):
    cache_name = generate_cache_name(filename, '_flow_lengths', None, None,
                                     None)

    if os.path.exists(cache_name):
        print "Hit a cache extracting flow lengths!"
        with open(cache_name) as f:
            with flock.Flock(f, flock.LOCK_EX) as lock:
                lines = f.readlines()
                if len(lines) == 0:
                    return []
                data = lines[0]
                if data == '':
                    return []
                lengths = [Decimal(x) for x in data.split(',')]
                return lengths

    # Get all the TCP packets, then look for SYNs and FINs.
    metadatas = extract_expcap_metadatas(filename)

    debug = False
    flows = {}
    deleted_flows = {}
    flow_count = 0
    flow_lengths = []
    for packet in metadatas:
        if packet.is_disabled():
            continue
        if packet.is_ip() and packet.is_tcp() and packet.is_tcp_syn():
            identifier = tcp_flow_identifier(packet)
            flows[identifier] = packet.wire_start_time()
            flow_count += 1
            if debug:
                print "Adding entry for", identifier

        if packet.is_ip() and packet.is_tcp() and (packet.is_tcp_fin()
                                                   or packet.is_tcp_rst()):
            identifier = tcp_flow_identifier(packet)
            if identifier in flows:
                flow_lengths.append(packet.wire_end_time() - flows[identifier])
                # Remove that flow, we don't want to count
                # everything twice.  We'll only delete it on
                # the first FIN, but that's OK.
                del flows[identifier]
                if debug:
                    deleted_flows[identifier] = True
                    print "Removing entry for ", identifier
            elif debug:
                print "Warning! Found a FIN/RST for a flow we didn't see a SYN for!"
                if debug:
                    if identifier in deleted_flows:
                        print "Actually, we did, it's just closed!"

    if len(flows) > 0:
        print "Warning: Saw ", len(
            flows), " SYNs for flows that weren't closed"
    with open(cache_name, 'w') as f:
        with flock.Flock(f, flock.LOCK_EX) as lock:
            f.write(','.join([str(length) for length in flow_lengths]))

    print "Flow count is ", flow_count
    return flow_lengths
示例#17
0
def transfer_actions():
    ''' 处理合并用户行为的任务,同时只允许一个实例运行,防止重复处理同一个任务。'''
    with open('/tmp/yym_task_transfer_actions.lock', 'w') as f:
        blocking_lock = flock.Flock(f, flock.LOCK_EX | flock.LOCK_NB)

        try:
            with blocking_lock:
                print 'Got lock and processing transferring actions tasks:'
                for task in db.session.query(Task).filter(
                        Task.processed == False).filter(
                            Task.type == u'transfer_actions').order_by(
                                Task.id):
                    print '* Processing task', task.id, task.type, task.data, ':'
                    data = json.loads(task.data)
                    from_user_id = data.get('from', None)
                    to_user_id = data.get('to', None)
                    if not from_user_id or not to_user_id:
                        continue
                    from_user = db.session.query(User).filter(
                        User.id == from_user_id).first()
                    to_user = db.session.query(User).filter(
                        User.id == to_user_id).first()
                    if not from_user or not to_user:
                        continue
                    # ToDo: 这里的替换有可能出现同一个用户多次 follow 一个账号等情况,不确定这是否会影响用户体验和统计数据。
                    # Comment.user_id :
                    db.session.query(Comment).filter(
                        Comment.user_id == from_user_id).update(
                            {'user_id': to_user_id})
                    db.session.commit()
                    # Comment.at_list:
                    for comment in db.session.query(Comment).filter(
                            Comment.at_list.ilike(
                                '%{}%'.format(from_user_id))):
                        at_list = util.get_ids_from_str(comment.at_list)
                        at_list_modified = map(
                            lambda x: to_user_id
                            if x == from_user_id else x, at_list)
                        if at_list != at_list_modified:
                            comment.at_list = ' '.join(
                                map(str, at_list_modified))
                            db.session.commit()
                    # Review.user_id :
                    db.session.query(Review).filter(
                        Review.user_id == from_user_id).update(
                            {'user_id': to_user_id})
                    db.session.commit()
                    util.count_reviews([from_user, to_user], [])
                    # Review.at_list:
                    for review in db.session.query(Review).filter(
                            Review.at_list.ilike('%{}%'.format(from_user_id))):
                        at_list = util.get_ids_from_str(review.at_list)
                        at_list_modified = map(
                            lambda x: to_user_id
                            if x == from_user_id else x, at_list)
                        if at_list != at_list_modified:
                            review.at_list = ' '.join(
                                map(str, at_list_modified))
                            db.session.commit()
                    # fans.user_id & fans.fan_id:
                    to_user.fans.extend(from_user.fans)
                    from_user.fans = []
                    to_user.follows.extend(from_user.follows)
                    from_user.follows = []
                    util.count_follow_fans([from_user, to_user],
                                           [from_user, to_user])
                    # favorites.user_id:
                    to_user.favorites.extend(from_user.favorites)
                    from_user.favorites = []
                    util.count_favorites([from_user, to_user], [])
                    # likes.user_id:
                    to_user.likes.extend(from_user.likes)
                    from_user.likes = []
                    util.count_likes([from_user, to_user], [])
                    # share_record.user_id:
                    to_user.share_records.extend(from_user.share_records)
                    from_user.share_records = []
                    util.count_shares([from_user, to_user], [], [], [])
                    db.session.commit()
                    # message 表没有做迁移。假定消息都能够被及时发出。
                    # token 表不迁移,保留原始登陆数据。
                    task.processed = True
                    db.session.commit()
        except IOError, e:
            print 'Transferring actions tasks have been under processing!'
示例#18
0
def extract_sizes_by_window(filename,
                            window_size,
                            count=None,
                            to_ip=None,
                            from_ip=None):
    cache_name = generate_cache_name(filename,
                                     '_sizes_by_window',
                                     to_ip,
                                     from_ip,
                                     count,
                                     window_size=window_size)
    if os.path.exists(cache_name):
        print "Hit a cache extracting sizes by window!"
        with open(cache_name) as f:
            with flock.Flock(f, flock.LOCK_EX) as lock:
                lines = f.readlines()
                windows = windows_list_from_string(lines[0])
                sizes = lines[1].split(',')
                for i in xrange(len(sizes)):
                    if sizes[i] != '':
                        this_sizes = sizes[i].split('_')
                        sizes[i] = [int(x) for x in this_sizes]
                    else:
                        sizes[i] = []
                return windows, sizes
    else:
        # Try to see if there are any multiple window size files
        # that we can build the right one from.
        other_name, other_size = get_caches_we_can_build_from(
            filename, '_sizes_by_window', to_ip, from_ip, count, window_size)
        if other_name != None:
            print "We are able to build the graph using a different cache file!"
            combination_factor = (int(window_size) / other_size)
            print "The combination factor is ", combination_factor
            with open(other_name) as f:
                with flock.Flock(f, flock.LOCK_EX) as lock:
                    bigger_windows = []
                    bigger_sizes = []

                    lines = f.readlines()
                    windows = windows_list_from_string(lines[0])
                    sizes = lines[1].split(',')

                    for base_index in range(0,
                                            len(sizes) / combination_factor,
                                            combination_factor):
                        window_start, _ = windows[base_index]
                        _, window_end = windows[base_index +
                                                combination_factor - 1]
                        this_window_sizes = []
                        for index in range(0, combination_factor):
                            if sizes[base_index + index] != '':
                                split_sizes = sizes[base_index +
                                                    index].split('_')
                                sub_window_sizes = [
                                    int(x) for x in split_sizes
                                ]
                            this_window_sizes += sub_window_sizes
                        bigger_sizes.append(this_window_sizes)
                        bigger_windows.append((window_start, window_end))
            return bigger_windows, bigger_sizes

    (windows, packets) = extract_windows(filename,
                                         window_size,
                                         count=count,
                                         to_ip=to_ip,
                                         from_ip=from_ip)

    sizes = []
    for packet_window in packets:
        window_sizes = []
        for (fraction_in_window, packet) in packet_window:
            window_sizes.append(packet.length)

        sizes.append(window_sizes)

    with open(cache_name, 'w') as f:
        with flock.Flock(f, flock.LOCK_EX) as lock:
            f.write(windows_list_to_string(windows))
            f.write(','.join([
                '_'.join([str(x) for x in size_list]) for size_list in sizes
            ]))

    return windows, sizes
示例#19
0
def find_bursts(filename,
                count=None,
                ipg_threshold=20000,
                packet_threshold=20,
                to_ip=None,
                from_ip=None):
    cache_name = generate_cache_name(filename,
                                     '_bursts',
                                     to_ip,
                                     from_ip,
                                     count,
                                     ipg_threshold=ipg_threshold,
                                     packet_threshold=packet_threshold)
    if os.path.exists(cache_name):
        print "Found a cache!  Going to use it!"
        with open(cache_name) as f:
            with flock.Flock(f, flock.LOCK_EX) as lock:
                bursts = expcap_metadata.double_list_load_expcaps_from(f)
                return bursts

    metadatas = extract_expcap_metadatas(filename,
                                         count=count,
                                         to_ip=to_ip,
                                         from_ip=from_ip)

    start = get_first_enabled(metadatas)
    if start == None:
        return []

    time = metadatas[start].wire_end_time()
    last_packet = metadatas[start]
    burst_count = 0
    current_burst = []
    bursts = []
    print "IPG Threshold is ", ipg_threshold
    print "Packet threshold is", packet_threshold
    for packet in metadatas[start + 1:]:
        if packet.is_disabled():
            continue
        next_time = packet.wire_start_time()

        if next_time - time < ipg_threshold:
            burst_count += 1
            # A burst starts with the last packet.
            if len(current_burst) == 0:
                current_burst.append(last_packet)
            current_burst.append(packet)
        else:
            burst_count = 0
            if len(current_burst) > packet_threshold:
                bursts.append(current_burst)
                current_burst = []
            current_burst = []

        time = packet.wire_end_time()
        last_packet = packet

    if len(current_burst) > packet_threshold:
        bursts.append(current_burst)

    total_bursts = sum([len(burst) for burst in bursts])
    print "We have a total of ", total_bursts, "packets in bursts"
    with open(cache_name, 'w') as f:
        with flock.Flock(f, flock.LOCK_EX) as lock:
            expcap_metadata.double_list_save_expcaps_to(f, bursts)

    return bursts
示例#20
0
#!/usr/bin/env python3
# coding: utf-8
import sys
import time

sys.path.append('../')

import flock
import configparser

conf = configparser.ConfigParser()
conf.read('../conf/config.ini', encoding='utf-8')

log = configparser.ConfigParser()
log.read('../conf/log.ini', encoding='utf-8')

flock_obj = flock.Flock(conf, log)
if not flock_obj.lock():
    sys.exit()
time.sleep(5)
# Unlock
#flock_obj.unlock()
示例#21
0
def run_model(node, freq):

    #print("Running model")
    rnn = Sequential()
    rnn_layer = GRU(units, input_shape=input_shape)
    rnn.add(rnn_layer)

    rnn.add(Dropout(0.5))
    rnn.add(Dense(1))
    opt = adam(lr=0.001, decay=0.0)

    rnn.compile(loss='mse', optimizer=opt)

    weights_file = weights_dir_base + node + '/' + freq + '.h5'
    if exists(weights_file):
        with open(weights_file, 'r') as w_file:
            with flock.Flock(w_file, flock.LOCK_EX) as lock:
                rnn.load_weights(weights_file)
    else:
        print("No weights yet!")
        exit(0)


    if not exists(predict_dir_base + node + '/'):
        makedirs(predict_dir_base + node + '/')

    predict_file = predict_dir_base + node + '/' + freq + '.out'
    if exists(predict_file):
        predict_latest_time = subprocess.check_output(['tail', '-1', predict_file]).decode('utf-8').split(",")[0]
    else:
        predict_latest_time = None

    smooth_file = smooth_dir_base + node + '/' + freq + '.out'

    if predict_latest_time:
        start_line = subprocess.check_output(
            ['grep', '-n', predict_latest_time, smooth_file]).decode('utf-8')
        start_line = int(start_line.split(":")[0]) - lb + 1
    else:
        start_line = 0



    smooth_lines = subprocess.check_output(['tail', '-n', '+' + str(start_line), smooth_dir_base + node + "/" + str(freq) + ".out"])
    smooth_lines = smooth_lines.decode("utf-8")
    smooth_lines = smooth_lines.split("\n")
    input_smooth_lines = []
    for line in smooth_lines[:-1]:
        splits = line.split(",")
        if len(splits) == 2:
            try:
                input_smooth_lines.append(float(splits[1]))
            except:
                pass


    #input_smooth_lines = [float(val.split(",")[1]) for val in smooth_lines[:-1]]
    print("smooth lines len", len(input_smooth_lines))
    timestamps = [val.split(",")[0] for val in smooth_lines[lb - 1:-1]]


    #recent_smooths = [float(val.split(",")[1]) for val in recent_smooth_lines.split("\n")[:-1]]
    input_data = []
    #output_data = []
    for i in range(len(input_smooth_lines) - lb + 1):
        input_data.append(input_smooth_lines[i:i+lb])
        #output_data.append(recent_smooths[i+lb+predict_step])

    input_data = np.reshape(np.array(input_data), (-1,1, lb))
    input_data = normalize(input_data)
    #output_data = np.reshape(np.array(output_data), (-1,1))

    results = rnn.predict(input_data)
    results = denormalize(results)
    print(results)
    results = np.reshape(results, (-1))
    print(results.shape[0])
    print(len(timestamps))
    #assert results.shape[0] == len(timestamps)
    with open(predict_file, 'a+') as f:

        for i in range(results.shape[0]):
            f.write(timestamps[i] + "," + str(results[i]) + '\n')
示例#22
0
def extract_utilizations(filename,
                         window_size,
                         count=None,
                         to_ip=None,
                         from_ip=None):
    cache_name = generate_cache_name(filename,
                                     '_bandwidths',
                                     to_ip,
                                     from_ip,
                                     count,
                                     window_size=str(window_size))
    if os.path.exists(cache_name):
        print "Hit a cache extracting utilizations!"
        with open(cache_name) as f:
            with flock.Flock(f, flock.LOCK_EX) as lock:
                lines = f.readlines()
                windows = windows_list_from_string(lines[0])
                usages = [Decimal(x) for x in (lines[1].split(','))]

                return (windows, usages)
    else:
        # Try to see if there are any multiple window size files
        # that we can build the right one from.
        other_name, other_size = get_caches_we_can_build_from(
            filename, '_bandwidths', to_ip, from_ip, count, window_size)
        if other_name != None:
            print "We are able to build the graph using a different cache file!"
            combination_factor = (int(window_size) / other_size)
            print "The combination factor is ", combination_factor
            with open(other_name) as f:
                with flock.Flock(f, flock.LOCK_EX) as lock:
                    bigger_windows = []
                    bigger_utilizations = []

                    lines = f.readlines()
                    windows = windows_list_from_string(lines[0])
                    utilizations = [Decimal(x) for x in lines[1].split(',')]

                    for base_index in range(0,
                                            len(windows) - combination_factor,
                                            combination_factor):
                        window_start, _ = windows[base_index]
                        _, window_end = windows[base_index +
                                                combination_factor - 1]

                        # Now, go through and build up the appropriate windows:
                        new_utilizations = []
                        for index in range(base_index,
                                           base_index + combination_factor):
                            # We want to weight the contribution to the
                            # utilization in the new window by the length of
                            # the window.
                            sub_window_start, sub_window_end = windows[index]
                            new_fraction = (sub_window_end - sub_window_start
                                            ) / (window_end - window_start)
                            new_utilizations.append(utilizations[index] *
                                                    new_fraction)

                        bigger_utilizations.append(sum(new_utilizations))
                        bigger_windows.append((window_start, window_end))
            # Before we return, werite the new utiliations out to disk.
            save_utilizations_in_cache(cache_name, bigger_windows,
                                       bigger_utilizations)
            return bigger_windows, bigger_utilizations

    (windows, packets) = extract_windows(filename,
                                         window_size,
                                         count=count,
                                         to_ip=to_ip,
                                         from_ip=from_ip)

    usages = []
    debug = False
    # For each window, go through and sum the total
    # fraction of time the window is in use.
    for i in xrange(len(windows)):
        (window_start, window_end) = windows[i]
        total_window_time = window_end - window_start
        time_used = Decimal(0.0)
        for (fraction, packet) in packets[i]:
            if debug:
                print fraction
                print packet.wire_length_time()
                print hash(packet)
            time_used += fraction * packet.wire_length_time()

        utilization = time_used / total_window_time
        usages.append(utilization)
        if debug:
            print "Window size is ", total_window_time
            print "Utilization is ", utilization
            print "Number of packets in the window is ", len(packets[i])
        if utilization > 1:
            print "Greater than one usage: see last ", len(
                packets[i]), "utilizations"

    save_utilizations_in_cache(cache_name, windows, usages)
    return (windows, usages)
def imshow(img,
           coords=None,
           title='Image',
           wait=True,
           destroy=True,
           save=False,
           normalize=False):
    global imshow_counter

    img = img.copy().astype(np.float32)

    def fill_region(dst, y, x, h, w, v):
        h2 = h // 2
        w2 = w // 2
        py = y - h2
        px = x - w2
        y_min = max(0, py)
        y_max = min(dst.shape[0], y + h2)
        x_min = max(0, px)
        x_max = min(dst.shape[1], x + w2)
        if y_max - y_min <= 0 or x_max - x_min <= 0:
            return

        dst[y_min:y_max, x_min:x_max] = v

    if normalize:
        img -= np.min(img)
        m = np.max(img)
        if m != 0.:
            img /= m

    if save:
        if np.all(img <= 1.0):
            img *= 255.

    if coords is not None:
        img = np.copy(img)
        if isinstance(coords, pd.DataFrame):
            for coord in coords.itertuples():
                fill_region(img, int(round(coord.row)) - 2, int(round(coord.col)) - 2, \
                            5, 5, np.asarray(cld.cls_colors[coord.cls]))
        else:
            for coord in coords:
                fill_region(img, int(round(coord[1])) - 2, int(round(coord[2])) - 2, \
                            5, 5, np.asarray(cld.cls_colors[coord[0]]))

    if save:
        if len(img.shape) == 2:
            img = img[:, :, None]
        lockfile = os.path.join(debug_dir, '.lock')
        with open(lockfile, 'w') as fp:
            with flock.Flock(fp, flock.LOCK_EX) as lock:
                curr_num = len(os.listdir(debug_dir))
                filename = os.path.join(debug_dir,
                                        'imshow_%s_%i.png' % (title, curr_num))
                cv2.imwrite(filename, img[..., ::-1])

        return

    plt.title(title)
    plt.imshow(img)
    plt.show()

    if wait:
        input('Press enter to continue...')
    if destroy:
        plt.close()
示例#24
0
def save_utilizations_in_cache(cache_name, windows, usages):
    with open(cache_name, 'w') as f:
        with flock.Flock(f, flock.LOCK_EX) as lock:
            f.write(windows_list_to_string(windows))
            f.write(','.join([str(usage) for usage in usages]))