Beispiel #1
0
def df():
    '''disk_usage'''
    df = []
    for part in psutil.disk_partitions(all=False):
        usage = psutil.disk_usage(part.mountpoint)
        percent = str(int(usage.percent)) + '%'
        disk = [part.device, bytes2human(usage.total),
                bytes2human(usage.used), bytes2human(usage.free),
                percent, part.mountpoint]
        df.append(disk)
    return df
Beispiel #2
0
    async def disk_check(self, data, exists=True):
        try:
            disk = psutil.disk_usage(data["disk"])
        except Exception:
            exists = False
            self.logger.error(f"Disk {data['disk']} does not exist")

        if exists:
            percent_used = round(disk.percent)
            total = bytes2human(disk.total)
            free = bytes2human(disk.free)

            if (100 - percent_used) < data["threshold"]:
                status = "failing"
            else:
                status = "passing"

            more_info = f"Total: {total}B, Free: {free}B"
        else:
            status = "failing"
            more_info = f"Disk {data['disk']} does not exist"

        payload = {
            "status": status,
            "more_info": more_info,
        }

        self.logger.debug(payload)

        resp = requests.patch(
            f"{self.astor.server}/api/v1/{data['id']}/checkrunner/",
            json.dumps(payload),
            headers=self.headers,
            timeout=15,
        )

        if (
            status == "failing"
            and data["assigned_task"]
            and data["assigned_task"]["enabled"]
        ):
            from taskrunner import TaskRunner

            task = TaskRunner(
                task_pk=data["assigned_task"]["id"],
                log_level=self.log_level,
                log_to=self.log_to,
            )
            await task.run_while_in_event_loop()

        return status
Beispiel #3
0
def df():
    '''disk_usage'''
    df = []
    for part in psutil.disk_partitions(all=False):
        usage = psutil.disk_usage(part.mountpoint)
        percent = str(int(usage.percent)) + '%'
        disk = [
            part.device,
            bytes2human(usage.total),
            bytes2human(usage.used),
            bytes2human(usage.free), percent, part.mountpoint
        ]
        df.append(disk)
    return df
Beispiel #4
0
def get_rusage_dict(children=False):
    # TODO:
    # number of child processes?
    # page size
    # difference between a page out and a major fault?
    # NOTE: values commented with an * are untracked on Linux
    if not resource:
        return {}
    who = resource.RUSAGE_SELF
    if children:
        who = resource.RUSAGE_CHILDREN
    rr = resource.getrusage(who)
    max_rss_human = bytes2human(rr.ru_maxrss * 1024, ndigits=1)

    ret = {'cpu_times': {'user_time': rr.ru_utime,
                         'sys_time': rr.ru_stime},
           'memory': {'max_rss_human': max_rss_human,
                      'max_rss': rr.ru_maxrss,
                      'shared_rss': rr.ru_ixrss,    # *
                      'unshared_rss': rr.ru_idrss,  # *
                      'stack_rss': rr.ru_isrss},    # *
           'page_faults': {'minor_faults': rr.ru_minflt,
                           'major_faults': rr.ru_majflt,
                           'page_outs': rr.ru_nswap},  # *
           'blocking_io': {'input_ops': rr.ru_inblock,
                           'output_ops': rr.ru_oublock},
           'messages': {'sent': rr.ru_msgsnd,  # *
                        'received': rr.ru_msgrcv},  # *
           'signals': {'received': rr.ru_nsignals},  # *
           'ctx_switches': {'voluntary': rr.ru_nvcsw,
                            'involuntary': rr.ru_nivcsw}}
    return ret
Beispiel #5
0
def df():
    '''disk'''

    name_list = [
        'disk_name', 'total_space', 'used_space', 'free_space', 'perent'
    ]
    df = []
    for part in psutil.disk_partitions(all=False):
        usage = psutil.disk_usage(part.mountpoint)
        percent = str(int(usage.percent)) + '%'
        disk = [
            part.device,
            bytes2human(usage.total),
            bytes2human(usage.used),
            bytes2human(usage.free), percent
        ]
        df.append(dict(zip(name_list, disk)))
    return df
Beispiel #6
0
def global_stats(verbose=True):
    global STATS
    total = {key: 0 for key in STATS[0] if not 'human' in key}
    for stat in STATS:
        for key in total:
            total[key] += stats[key]
    for key in total:
        total[key] /= len(STATS)
    total['size_round_human'] = bytes2human(total['size_round']) + 'Byte'
    total['bps_node_human'] = bytes2human(
        total['byte_per_sec'] / HN_COUNT * 8) + 'bps'
    total['monthly_node_traffic_human'] = bytes2human(
        total['byte_per_sec'] / HN_COUNT * 3600 * 24 * 30.5) + 'B'
    total['time_to_sync_pc_human'] = str(
        math.ceil(total['time_to_sync'] / SLOT_TIME_SEC * 100)) + '%'
    if verbose:
        print(json.dumps(total))
    return total
Beispiel #7
0
    def get_disks(self):
        disks = defaultdict(dict)
        try:
            for part in psutil.disk_partitions(all=False):
                if os.name == "nt":
                    if "cdrom" in part.opts or part.fstype == "":
                        continue
                usage = psutil.disk_usage(part.mountpoint)
                device = part.device.split("\\", 1)[0]
                disks[device]["device"] = device
                disks[device]["total"] = bytes2human(usage.total)
                disks[device]["used"] = bytes2human(usage.used)
                disks[device]["free"] = bytes2human(usage.free)
                disks[device]["percent"] = int(usage.percent)
                disks[device]["fstype"] = part.fstype
        except Exception:
            disks = {"error": "error getting disk info"}

        return disks
Beispiel #8
0
def get_stats():
    global g
    global DOWN_COUNT
    g = Graph()
    g.add_vertices(len(NODES))
    for link in LINKS:
        from_juror = NODES[link[0]]['juror']
        link_color = PALETTE[0] if from_juror else "#aaaaaa"
        g.add_edge(link[0], link[1], link_color=link_color)
    # print(g.degree_distribution())
    giant = max(g.components().sizes())
    running = HN_COUNT - DOWN_COUNT
    stats = {}
    stats['down_count'] = DOWN_COUNT
    stats['size_round'] = int(len(LINKS) * NETWORK_BYTES_PER_ROUND_PER_LINK)
    stats['size_round_human'] = bytes2human(stats['size_round']) + 'Byte'
    stats['byte_per_sec'] = int(stats['size_round'] / ROUND_TIME_SEC)
    stats['size_round_per_node'] = int(stats['size_round'] / HN_COUNT)
    stats['byte_per_sec_per_node'] = int(stats['byte_per_sec'] / HN_COUNT)
    stats['bps_per_node'] = int(stats['byte_per_sec'] / HN_COUNT * 8)
    stats['bps_node_human'] = bytes2human(
        stats['byte_per_sec'] / HN_COUNT * 8) + 'bps'
    stats['monthly_node_traffic_human'] = bytes2human(
        stats['byte_per_sec'] / HN_COUNT * 3600 * 24 * 30.5) + 'B'
    path_length_hist = g.path_length_hist(directed=False)
    stats['mean_path'] = path_length_hist.mean
    stats['time_to_sync'] = math.ceil(stats['mean_path']) * WAIT
    stats['time_to_sync_pc_human'] = str(
        math.ceil(stats['time_to_sync'] / SLOT_TIME_SEC * 100)) + '%'
    stats['disconnected'] = running - giant
    stats['avg_degree'] = mean([node['in'] + node['out'] for node in NODES])
    stats['avg_degree_non_juror'] = mean(
        [node['in'] + node['out'] for node in NODES if not node['juror']])
    # These one are important, that's what determine the forging state possibility
    stats['avg_degree_juror'] = mean(
        [node['in'] + node['out'] for node in NODES if node['juror']])
    stats['min_degree_juror'] = min(
        [node['in'] + node['out'] for node in NODES if node['juror']])
    stats['max_degree_juror'] = max(
        [node['in'] + node['out'] for node in NODES if node['juror']])
    return stats
Beispiel #9
0
def get_rusage_dict(children=False):
    # TODO:
    # number of child processes?
    # page size
    # difference between a page out and a major fault?
    # NOTE: values commented with an * are untracked on Linux
    if not resource:
        return {}
    who = resource.RUSAGE_SELF
    if children:
        who = resource.RUSAGE_CHILDREN
    rr = resource.getrusage(who)
    if sys.platform == 'darwin':
        rss_bytes = rr.ru_maxrss  # darwin breaks posix
    else:
        rss_bytes = rr.ru_maxrss * 1024
    max_rss_human = bytes2human(rss_bytes, ndigits=1)

    ret = {
        'cpu_times': {
            'user_time': rr.ru_utime,
            'sys_time': rr.ru_stime
        },
        'memory': {
            'max_rss_human': max_rss_human,
            'max_rss': rr.ru_maxrss,
            'shared_rss': rr.ru_ixrss,  # *
            'unshared_rss': rr.ru_idrss,  # *
            'stack_rss': rr.ru_isrss
        },  # *
        'page_faults': {
            'minor_faults': rr.ru_minflt,
            'major_faults': rr.ru_majflt,
            'page_outs': rr.ru_nswap
        },  # *
        'blocking_io': {
            'input_ops': rr.ru_inblock,
            'output_ops': rr.ru_oublock
        },
        'messages': {
            'sent': rr.ru_msgsnd,  # *
            'received': rr.ru_msgrcv
        },  # *
        'signals': {
            'received': rr.ru_nsignals
        },  # *
        'ctx_switches': {
            'voluntary': rr.ru_nvcsw,
            'involuntary': rr.ru_nivcsw
        }
    }
    ret['rlimit'] = get_rlimit_dict()
    return ret
Beispiel #10
0
def disp_online_info(online_info):
    log('Online status:           ' + online_info['online'])
    if online_info['online'] == 'Online':
        log('Online user:             '******'username'])
        log('Time of login:           '******'login_timestamp']))
        log('Online time:             ' +
            sec2human(online_info['online_time']))
        log('Monthly online time:     ' +
            sec2human(online_info['monthly_online_time']))
        log('Online IP:               ' + online_info['ip'])
        log('IPv4 uasge:              ' + bytes2human(online_info['bytes']))
        log('Account balance:         ' + str(online_info['balance']))
Beispiel #11
0
def main():
    parser = argparse.ArgumentParser(prog="pyuvssim",
                                     description="Compares two YUV I420/IYUV raw video files using the SSIM metric")
    parser.add_argument('base_video', metavar='video1.yuv')
    parser.add_argument('comparison_video', metavar='video2.yuv')
    parser.add_argument('-W', '--width',
                        type=int,
                        action='store',
                        default=1920,
                        nargs='?',
                        help='video width in pixels')
    parser.add_argument('-H', '--height',
                        type=int,
                        action='store',
                        default=1080,
                        nargs='?',
                        help='video width in pixels')
    args = parser.parse_args()

    vid1 = args.base_video
    vid2 = args.comparison_video

    width = args.width
    height = args.height
    frame_size = width * height
    frame_weight = (frame_size * 3) / 2
    video_size = min(os.stat(vid1)[6], os.stat(vid2)[6])
    nb_frames = video_size / frame_weight

    print("Videos information:")
    print("width: {} px".format(width))
    print("height: {} px".format(height))
    print("frame size: {} px^2".format(frame_size))
    print("frame weight: {} ({})".format(utils.bytes2human(frame_weight), frame_weight))
    print("video size: {} ({})".format(utils.bytes2human(video_size), video_size))
    print("number of frames: {}\n".format(nb_frames))

    f1 = open(vid1, 'rb')
    f2 = open(vid2, 'rb')

    print("Pic #, SSIM value")
    for n in range(nb_frames):
        frame_offset = (n * frame_weight)
        im1 = Image.new("RGB", (width, height))
        im2 = Image.new("RGB", (width, height))
        pix1 = im1.load()
        pix2 = im2.load()
        # I420/IYUV: NxN Y plane, then (N/2)x(N/2) U and V planes
        for y in range(height):
            for x in range(width):
                pos_y = frame_offset + (y * width + x)
                pos_u = frame_offset + (y/2 * width/2 + x/2 + frame_size)
                pos_v = frame_offset + (y/2 * width/2 + x/2 + frame_size + frame_size/4)

                f1.seek(pos_y, 0)
                y1 = ord(f1.read(1))
                f1.seek(pos_u, 0)
                u1 = ord(f1.read(1))
                f1.seek(pos_v, 0)
                v1 = ord(f1.read(1))

                f2.seek(pos_y, 0)
                y2 = ord(f2.read(1))
                f2.seek(pos_u, 0)
                u2 = ord(f2.read(1))
                f2.seek(pos_v, 0)
                v2 = ord(f2.read(1))

                pix1[x, y] = utils.yuv2rgb(y1, u1, v1)
                pix2[x, y] = utils.yuv2rgb(y2, u2, v2)
        print("{}, {}".format(n, ssim.compute_ssim(im1, im2)))

    f1.close()
    f2.close()
Beispiel #12
0
def convert_worker(target_format, message, url, config, bot):
    """Generic process spawned every time user sends a link or a file"""
    input_filename = "".join([config["temp_path"], utils.random_string()])
    output_filename = "".join(
        [config["temp_path"],
         utils.random_string(), ".", target_format])

    # Tell user that we are working
    status_message = bot.reply_to(message, text.starting, parse_mode="HTML")

    def update_status_message(new_text):
        bot.edit_message_text(chat_id=status_message.chat.id,
                              message_id=status_message.message_id,
                              text=new_text,
                              parse_mode="HTML")

    # Try to download URL
    try:
        r = requests.get(url, stream=True)
    except:
        update_status_message(text.error.downloading)
        return

    # Check file size
    if int(r.headers.get("Content-Length", "0")) >= MAXIMUM_FILESIZE_ALLOWED:
        update_status_message(text.error.huge_file)
        return

    # Download the file
    update_status_message(text.downloading)
    chunk_size = 4096
    raw_input_size = 0
    try:
        with open(input_filename, "wb") as f:
            for chunk in r.iter_content(chunk_size=chunk_size):
                f.write(chunk)
                raw_input_size += chunk_size
                # Download files without Content-Length, but apply standard limit to them
                if raw_input_size >= MAXIMUM_FILESIZE_ALLOWED:
                    update_status_message(text.error.huge_file)
                    utils.rm(input_filename)
                    return
    except:
        update_status_message(text.error.downloading)
        bot.reply_to(message, f"HTTP {r.status_code}")
        return

    # Start ffmpeg
    ffmpeg_process = None
    if target_format == "mp4":
        ffmpeg_process = subprocess.Popen([
            "ffmpeg",
            "-v",
            "error",
            "-threads",
            str(config["ffmpeg_threads"]),
            "-i",
            input_filename,
            "-map",
            "V:0?",  # select video stream
            "-map",
            "0:a?",  # ignore audio if doesn't exist
            "-c:v",
            "libx264",  # specify video encoder
            "-max_muxing_queue_size",
            "9999",  # https://trac.ffmpeg.org/ticket/6375
            "-movflags",
            "+faststart",  # optimize for streaming
            "-preset",
            "veryslow",  # https://trac.ffmpeg.org/wiki/Encode/H.264#a2.Chooseapresetandtune
            "-timelimit",
            "900",  # prevent DoS (exit after 15 min)
            "-vf",
            "pad=ceil(iw/2)*2:ceil(ih/2)*2",  # https://stackoverflow.com/questions/20847674/ffmpeg-libx264-height-not-divisible-by-2#20848224
            output_filename,
        ])
    elif target_format == "png":
        ffmpeg_process = subprocess.Popen([
            "ffmpeg",
            "-v",
            "error",
            "-threads",
            str(config["ffmpeg_threads"]),
            "-thread_type",
            "slice",
            "-i",
            input_filename,
            "-timelimit",
            "60",  # prevent DoS (exit after 15 min)
            output_filename,
        ])

    # Update progress while ffmpeg is alive
    old_progress = ""
    while ffmpeg_process.poll() == None:
        try:
            raw_output_size = utils.filesize(output_filename)
        except FileNotFoundError:
            raw_output_size = 0

        if raw_output_size >= MAXIMUM_FILESIZE_ALLOWED:
            update_status_message(text.error.huge_file)
            ffmpeg_process.kill()
            utils.rm(output_filename)

        input_size = utils.bytes2human(raw_input_size)
        output_size = utils.bytes2human(raw_output_size)

        progress = f"{output_size} / {input_size}"
        # Update progress only if it changed
        if progress != old_progress:
            update_status_message(text.converting.format(progress))
            old_progress = progress
        time.sleep(2)

    # Exit in case of error with ffmpeg
    if ffmpeg_process.returncode != 0:
        update_status_message(text.error.converting)
        # Clean up and close pipe explicitly
        utils.rm(output_filename)
        return

    # Check output file size
    output_size = utils.filesize(output_filename)
    if output_size >= MAXIMUM_FILESIZE_ALLOWED:
        update_status_message(text.error.huge_file)
        # Clean up and close pipe explicitly
        utils.rm(output_filename)
        return

    # Default params for sending operation
    data = {
        "chat_id": message.chat.id,
        "reply_to_message_id": message.message_id
    }

    if target_format == "mp4":
        data.update({"supports_streaming": True})
        # 1. Get video duration in seconds
        video_duration = subprocess.run(
            [
                "ffprobe",
                "-v",
                "error",
                "-select_streams",
                "v:0",
                "-show_entries",
                "format=duration",
                "-of",
                "default=noprint_wrappers=1:nokey=1",
                output_filename,
            ],
            stdout=subprocess.PIPE,
        ).stdout.decode("utf-8").strip()

        video_duration = round(float(video_duration))
        data.update({"duration": video_duration})

        # 2. Get video height and width
        video_props = subprocess.run(
            [
                "ffprobe",
                "-v",
                "error",
                "-select_streams",
                "v:0",
                "-show_entries",
                "stream=width,height",
                "-of",
                "csv=s=x:p=0",
                output_filename,
            ],
            stdout=subprocess.PIPE,
        ).stdout.decode("utf-8").strip()

        video_width, video_height = video_props.split("x")
        data.update({"width": video_width, "height": video_height})

        # 3. Take one frame from the middle of the video
        update_status_message(text.generating_thumbnail)
        thumbnail = "".join(
            [config["temp_path"],
             utils.random_string(), ".jpg"])
        generate_thumbnail_process = subprocess.Popen([
            "ffmpeg",
            "-v",
            "error",
            "-i",
            output_filename,
            "-vcodec",
            "mjpeg",
            "-vframes",
            "1",
            "-an",
            "-f",
            "rawvideo",
            "-ss",
            str(int(video_duration / 2)),
            # keep the limit of 90px height/width (Telegram API) while preserving the aspect ratio
            "-vf",
            "scale='if(gt(iw,ih),90,trunc(oh*a/2)*2)':'if(gt(iw,ih),trunc(ow/a/2)*2,90)'",
            thumbnail,
        ])

        # While process is alive (i.e. is working)
        while generate_thumbnail_process.poll() == None:
            time.sleep(1)

        # Exit in case of error with ffmpeg
        if generate_thumbnail_process.returncode != 0:
            update_status_message(text.error.generating_thumbnail)
            return

        update_status_message(text.uploading)
        requests.post(
            "https://api.telegram.org/bot{}/sendVideo".format(
                config["telegram_token"]),
            data=data,
            files=[
                ("video", (utils.random_string() + ".mp4",
                           open(output_filename, "rb"), "video/mp4")),
                ("thumb", (utils.random_string() + ".jpg",
                           open(thumbnail, "rb"), "image/jpeg")),
            ],
        )
        utils.rm(input_filename)
        utils.rm(output_filename)
        utils.rm(thumbnail)

    elif target_format == "png":
        # Upload to Telegram
        update_status_message(text.uploading)
        requests.post(
            "https://api.telegram.org/bot{}/sendPhoto".format(
                config["telegram_token"]),
            data=data,
            files=[("photo", (utils.random_string() + ".png",
                              open(output_filename, "rb"), "image/png"))],
        )
        requests.post(
            "https://api.telegram.org/bot{}/sendDocument".format(
                config["telegram_token"]),
            data=data,
            files=[("document", (utils.random_string() + ".png",
                                 open(output_filename, "rb"), "image/png"))],
        )
        utils.rm(input_filename)
        utils.rm(output_filename)

    bot.delete_message(message.chat.id, status_message.message_id)
Beispiel #13
0
    sizes[len(e)].append(k)

sorted_sizes = list(sizes.keys())
sorted_sizes.sort(reverse=True)

total_ids = 0
total_images = 0
for k in sorted_sizes:
    print('{:4} ids with {:4} images'.format(len(sizes[k]), k))
    total_ids += len(sizes[k])
    total_images += len(sizes[k]) * k

print('Total ids: {}'.format(total_ids))
print('Total images: {}'.format(total_images))

print(utils.bytes2human(expected_size),
      'will be needed to download all the images')

a = input('> ')
ids_to_download = sizes[a]
download_imgs = True

with FTP(server) as ftp:
    print('Login into {}'.format(server))
    ftp.login()

    base_dir = ftp.pwd()

    for idx, source_dir in enumerate(source_dirs):
        ftp.cwd(base_dir)
        ftp.cwd(source_dir)
Beispiel #14
0
def main():
    parser = argparse.ArgumentParser(
        prog="pyuvssim",
        description=
        "Compares two YUV I420/IYUV raw video files using the SSIM metric")
    parser.add_argument('base_video', metavar='video1.yuv')
    parser.add_argument('comparison_video', metavar='video2.yuv')
    parser.add_argument('-W',
                        '--width',
                        type=int,
                        action='store',
                        default=1920,
                        nargs='?',
                        help='video width in pixels')
    parser.add_argument('-H',
                        '--height',
                        type=int,
                        action='store',
                        default=1080,
                        nargs='?',
                        help='video width in pixels')
    args = parser.parse_args()

    vid1 = args.base_video
    vid2 = args.comparison_video

    width = args.width
    height = args.height
    frame_size = width * height
    frame_weight = (frame_size * 3) / 2
    video_size = min(os.stat(vid1)[6], os.stat(vid2)[6])
    nb_frames = video_size / frame_weight

    print("Videos information:")
    print("width: {} px".format(width))
    print("height: {} px".format(height))
    print("frame size: {} px^2".format(frame_size))
    print("frame weight: {} ({})".format(utils.bytes2human(frame_weight),
                                         frame_weight))
    print("video size: {} ({})".format(utils.bytes2human(video_size),
                                       video_size))
    print("number of frames: {}\n".format(nb_frames))

    f1 = open(vid1, 'rb')
    f2 = open(vid2, 'rb')

    print("Pic #, SSIM value")
    for n in range(nb_frames):
        frame_offset = (n * frame_weight)
        im1 = Image.new("RGB", (width, height))
        im2 = Image.new("RGB", (width, height))
        pix1 = im1.load()
        pix2 = im2.load()
        # I420/IYUV: NxN Y plane, then (N/2)x(N/2) U and V planes
        for y in range(height):
            for x in range(width):
                pos_y = frame_offset + (y * width + x)
                pos_u = frame_offset + (y / 2 * width / 2 + x / 2 + frame_size)
                pos_v = frame_offset + (y / 2 * width / 2 + x / 2 +
                                        frame_size + frame_size / 4)

                f1.seek(pos_y, 0)
                y1 = ord(f1.read(1))
                f1.seek(pos_u, 0)
                u1 = ord(f1.read(1))
                f1.seek(pos_v, 0)
                v1 = ord(f1.read(1))

                f2.seek(pos_y, 0)
                y2 = ord(f2.read(1))
                f2.seek(pos_u, 0)
                u2 = ord(f2.read(1))
                f2.seek(pos_v, 0)
                v2 = ord(f2.read(1))

                pix1[x, y] = utils.yuv2rgb(y1, u1, v1)
                pix2[x, y] = utils.yuv2rgb(y2, u2, v2)
        print("{}, {}".format(n, ssim.compute_ssim(im1, im2)))

    f1.close()
    f2.close()