예제 #1
0
def __main__():
    op_system_name = os.name
    op_user = getpass.getuser()
    print(op_user)
    if op_system_name == 'posix':
        print("This operating System is a Mac")
    if op_system_name == 'nt':
        print("this operating System is a Windows")
    if op_system_name == "java":
        print("this operating System is Java")
    total, used, free = shutil.disk_usage("/")
    print("Total Storage: %d GB" % (total // (2**30)))
    print("Used Storage: %d GB" % (used // (2**30)))
    print("Free Storage: %d GB" % (free // (2**30)))
    print("cpu percentage = {}\nVirtual Storage = {}".format(
        psutil.cpu_percent(), psutil.virtual_memory()))
    for each in psutil.virtual_memory():
        print(each)
    print('memory % used:', psutil.virtual_memory()[2])
    print(GPUInfo.check_empty())
    print(GPUInfo.get_info())
    print(GPUInfo.gpu_usage())
    mem = psutil.virtual_memory()
    print(mem.total)
    print("Ram = {}".format(mem.total / 1024.**3))
    op_check = OpSysChecker()
    print("-----------------\n{}".format(op_check.return_info()))
예제 #2
0
def handler(event, context):

    if isinstance(event['data'], dict) and "num_image" in event['data']:
        global NUM_IMAGE
        NUM_IMAGE = int(event['data']['num_image'])

    # Get GPU counts
    NUM_GPU = 0
    available_devices = GPUInfo.check_empty()
    if available_devices != None:
        NUM_GPU = len(available_devices)
    print("Current GPU num is {0}".format(NUM_GPU))

    counter = 0
    image_list = list()
    for img in os.listdir(INF_DIR):
        image_list.append(os.path.join(INF_DIR, img))
        counter += 1
        if counter == NUM_IMAGE:
            break

    start = time.time()

    if NUM_GPU == 0:
        run_sequential(image_list)
    else:
        # initialize Scheduler
        scheduler = Scheduler(NUM_GPU)
        # start multiprocessing
        scheduler.start(image_list)

    end = time.time()
    # print ("Time with model loading {0} for {1} images.".format(end - start, NUM_IMAGE))
    return ("Time with model loading {0} for {1} images.".format(
        end - start, NUM_IMAGE))
예제 #3
0
def gpu_usage():
    total_memory = 32000
    available_device = GPUInfo.check_empty()
    percent, memory = GPUInfo.gpu_usage()
    for i in range(len(memory)):
        memory[i] = float(memory[i]) / total_memory
    print(memory)
    return memory
def get_gpu_id(num_gpu=1):
    """get ID of GPUS
    :param num_gpu:  num of GPUs to use
    :return: gpu_id: ID of allocated GPUs
    """
    available_device = GPUInfo.check_empty()
    if len(available_device) >= num_gpu:
        gpu_id = available_device[:num_gpu]
    else:
        raise Exception('Only {} GPUs to use!'.format(len(available_device)))
    if num_gpu == 1:
        gpu_id = gpu_id[0]
    return gpu_id
예제 #5
0
def load_model():
    # exceed quota so need to load data from GCS. Will download data to local from GCS
    # bucket_name = 'license_plate_detector_jonathan'
    # file_id = ['yolov3_custom_train_1800.weights', 'crnn_kurapan.h5', 'craft_mlt_25k.h5']

    # storage_client = storage.Client.from_service_account_json('auth_key.json')
    # bucket = storage_client.bucket(bucket_name)

    # for source_blob_name in file_id:
    # if source_blob_name not in os.listdir():
    # blob = bucket.blob(source_blob_name)
    # blob.download_to_filename(source_blob_name)

    # YOLO CONFIGURATIONS
    net = cv2.dnn.readNetFromDarknet('yolov3_custom_train.cfg',
                                     'yolov3_custom_train_1800.weights')
    net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)

    # check if GPU is available else use CPU
    gpus = GPUInfo.check_empty()
    print('gpus: ', gpus)
    if gpus is not None:
        net.setPreferableTarget(cv2.dnn.DNN_TARGET_OPENCL)
    else:
        net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)

    # keras-ocr will load from local file
    detector = keras_ocr.detection.Detector(
        weights='clovaai_general',
        load_from_torch=False,
        optimizer='adam',
        backbone_name='vgg',
        weights_path_local='craft_mlt_25k.h5')
    recognizer = keras_ocr.recognition.Recognizer(
        alphabet=None,
        weights='kurapan',
        build_params=None,
        weights_path='crnn_kurapan.h5')

    pipeline = keras_ocr.pipeline.Pipeline(detector=detector,
                                           recognizer=recognizer)

    # return YOLOv3 model, KerasOCR model
    return [net, pipeline]
예제 #6
0
 async def process_video(self):
     total_time = sum([video.video_length_flv for video in self.videos])
     max_size = 8000_000 * 8  # Kb
     audio_bitrate = 320
     video_bitrate = (max_size / total_time -
                      audio_bitrate) - 500  # just to be safe
     max_video_bitrate = float(
         8000)  # BiliBili now re-encode every video anyways
     video_bitrate = int(min(max_video_bitrate, video_bitrate))
     ffmpeg_command = f'''ffmpeg -y -loop 1 -t {total_time} \
     -i "{self.output_path()['he_graph']}" \
     -f concat \
     -safe 0 \
     -i "{self.output_path()['concat_file']}" \
     -t {total_time} \
     -filter_complex "
     [0:v][1:v]scale2ref=iw:iw*(main_h/main_w)[color][ref];
     [color]split[color1][color2];
     [color1]hue=s=0[gray];
     [color2]negate=negate_alpha=1[color_neg];
     [gray]negate=negate_alpha=1[gray_neg];
     color=black:d={total_time}[black];
     [black][ref]scale2ref[blackref][ref2];
     [blackref]split[blackref1][blackref2];
     [color_neg][blackref1]overlay=x=t/{total_time}*W-W[color_crop_neg];
     [gray_neg][blackref2]overlay=x=t/{total_time}*W[gray_crop_neg];
     [color_crop_neg]negate=negate_alpha=1[color_crop];
     [gray_crop_neg]negate=negate_alpha=1[gray_crop];
     [ref2][color_crop]overlay=y=main_h-overlay_h[out_color];
     [out_color][gray_crop]overlay=y=main_h-overlay_h[out];
     [out]ass='{self.output_path()['ass']}'[out_sub]" \
     -map "[out_sub]" -map 1:a ''' + \
                      (" -c:v h264_nvenc -preset slow "
                       if GPUInfo.check_empty() is not None else " -c:v libx264 -preset medium ") + \
                      f'-b:v {video_bitrate}K' + f''' -b:a 320K -ar 44100  "{self.output_path()['danmaku_video']}" \
                 ''' + f'>> "{self.output_path()["video_log"]}" 2>&1'
     await async_wait_output(ffmpeg_command)
예제 #7
0
from gpuinfo import GPUInfo

available_device=GPUInfo.check_empty()
#available_device就是一个含有所有没有任务的gpu编号的列表
print(available_device)

percent,memory=GPUInfo.gpu_usage()
#获得所有gpu的使用百分比和显存占用量
print(percent, memory)

min_percent=percent.index(min([percent[i] for i in available_device]))
#未被使用的gpu里percent最小的
print(min_percent)

min_memory=memory.index(min([memory[i] for i in available_device]))
#未被使用的gpu里显存占用量最少的

print(min_memory)
def video_processor():
    while True:
        request_json = video_request_queue.get()
        try:
            flv_file_path = request_json['RelativePath']
            he_time = request_json['he_time']
            base_file_path = flv_file_path.rpartition('.')[0]
            video_file_path = base_file_path + ".bar.mp4"
            png_file_path = base_file_path + ".png"
            video_log_path = base_file_path + ".video.log"
            total_seconds_str = subprocess.check_output(
                f'ffprobe -v error -show_entries format=duration '
                f'-of default=noprint_wrappers=1:nokey=1 "{flv_file_path}"',
                shell=True)
            total_seconds = float(total_seconds_str)
            max_size = 8000_000 * 8  # Kb
            audio_bitrate = 320
            video_bitrate = (max_size / total_seconds -
                             audio_bitrate) - 500  # just to be safe
            max_video_bitrate = float(4500)
            video_bitrate = int(min(max_video_bitrate, video_bitrate))

            he_time = int(min(float(he_time), total_seconds -
                              5))  # prevent preview image generation error

            ffmpeg_command_img = f"ffmpeg -ss {he_time} -i \"{flv_file_path}\" -vframes 1 \"{png_file_path}\"" \
                                 f" >> \"{video_log_path}\" 2>&1"
            print(ffmpeg_command_img, file=sys.stderr)
            return_value = os.system(ffmpeg_command_img)
            return_text = "Processing completed" if return_value == 0 else "Processing error"
            print(
                f"Room {request_json['RoomId']} at {request_json['StartRecordTime']}: image {return_text}",
                file=sys.stderr)
            if not os.path.isfile(png_file_path):
                print("Video preview file cannot be found", file=sys.stderr)
            else:
                print("uploading quick video", file=sys.stderr)
                request_json['is_update'] = False
                upload_no_danmaku_video(request_json)
            ffmpeg_command = f'FILE=\"{base_file_path}\" ' + ''' \
&& TIME=`ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 "${FILE}.flv"`\
&& ffmpeg -loop 1 -t ${TIME} \
-i "${FILE}.he.png" \
-i "${FILE}.flv" \
-filter_complex "
[0:v][1:v]scale2ref=iw:iw*(main_h/main_w)[color][ref];
[color]split[color1][color2];
[color1]hue=s=0[gray];
[color2]negate=negate_alpha=1[color_neg];
[gray]negate=negate_alpha=1[gray_neg];
color=black:d=${TIME}[black];
[black][ref]scale2ref[blackref][ref2];
[blackref]split[blackref1][blackref2];
[color_neg][blackref1]overlay=x=t/${TIME}*W-W[color_crop_neg];
[gray_neg][blackref2]overlay=x=t/${TIME}*W[gray_crop_neg];
[color_crop_neg]negate=negate_alpha=1[color_crop];
[gray_crop_neg]negate=negate_alpha=1[gray_crop];
[ref2][color_crop]overlay=y=main_h-overlay_h[out_color];
[out_color][gray_crop]overlay=y=main_h-overlay_h[out];
[out]ass='${FILE}.ass'[out_sub]" \
-map "[out_sub]" -map 1:a ''' + \
                             (" -c:v h264_nvenc -preset slow "
                              if GPUInfo.check_empty() is not None else " -c:v libx264 -preset medium ") + \
                             f'-b:v {video_bitrate}K' + ''' -b:a 320K -ar 44100  "${FILE}.bar.mp4" \
            ''' + f'>> "{video_log_path}" 2>&1'
            print(ffmpeg_command, file=sys.stderr)
            return_value = os.system(ffmpeg_command)
            return_text = "Processing completed" if return_value == 0 else "Processing error"
            print(
                f"Room {request_json['RoomId']} at {request_json['StartRecordTime']}: video {return_text}",
                file=sys.stderr)
            if not os.path.isfile(video_file_path):
                print("Video file cannot be found", file=sys.stderr)
            else:
                print("Updating full video", file=sys.stderr)
                request_json['is_update'] = True
                upload_no_danmaku_video(request_json)
        except Exception as err:
            # noinspection PyBroadException
            try:
                print(
                    f"Room {request_json['RoomId']} at {request_json['StartRecordTime']}: {err}",
                    file=sys.stderr)
                print(traceback.format_exc(), file=sys.stderr)
            except Exception:
                print(f"Unknown video exception", file=sys.stderr)
                print(traceback.format_exc(), file=sys.stderr)
        finally:
            print(f"Video queue length: {danmaku_request_queue.qsize()}",
                  file=sys.stderr)
            sys.stderr.flush()
예제 #9
0
    else:
        # command = ['nohup', 'python -u', d['command']+' --gpu %s'%(str(gpu_id)), '&disown']
        command = ['nohup', d['command']+' --gpu %s'%(str(gpu_id)), '&disown']
        command = 'nohup '+d['command']+' --gpu %s'%(str(gpu_id))#+' &disown'
    
    # * Call it from terminal
    print(command)
    subprocess.call(command, shell=True)

    # * Delete the execute file
    Path(execute_file).unlink()

if __name__ == '__main__':

    # * Keep script running in background indefinitely
    while True:
        
        # * Check available GPUs
        gpus_available = GPUInfo.check_empty()
        if gpus_available:
            
            # * For each available GPU, attempt to run an experiment on it.
            for gpu_id in gpus_available:
                exp_file = find_oldest_experiment()

                # * An experiment is only launched if it exists 
                if exp_file:
                    launch_and_remove_experiment(exp_file, gpu_id=gpu_id)
        
        # * Check for new experiments every N seconds
        time.sleep(5)