예제 #1
0
def __main__():
    op_system_name = os.name
    op_user = getpass.getuser()
    print(op_user)
    if op_system_name == 'posix':
        print("This operating System is a Mac")
    if op_system_name == 'nt':
        print("this operating System is a Windows")
    if op_system_name == "java":
        print("this operating System is Java")
    total, used, free = shutil.disk_usage("/")
    print("Total Storage: %d GB" % (total // (2**30)))
    print("Used Storage: %d GB" % (used // (2**30)))
    print("Free Storage: %d GB" % (free // (2**30)))
    print("cpu percentage = {}\nVirtual Storage = {}".format(
        psutil.cpu_percent(), psutil.virtual_memory()))
    for each in psutil.virtual_memory():
        print(each)
    print('memory % used:', psutil.virtual_memory()[2])
    print(GPUInfo.check_empty())
    print(GPUInfo.get_info())
    print(GPUInfo.gpu_usage())
    mem = psutil.virtual_memory()
    print(mem.total)
    print("Ram = {}".format(mem.total / 1024.**3))
    op_check = OpSysChecker()
    print("-----------------\n{}".format(op_check.return_info()))
예제 #2
0
def avaiable_gpus(detect_time=3, cpu_ratio=0.5, mem_ratio=0.5):
    """
    get avaiable gpu device ids
    :param detect_time: seconds for detect
    :param use_ratio: ratio lower bound of cpu and mem usage
    :return: a list of avaiable gpu ids
    """
    assert type(detect_time) == int and cpu_ratio <= 1. and mem_ratio <= 1.
    # print('detecting valid gpus in %d seconds' % detect_time)

    # 1.使用正则表达式获取单GPU总内存(所有显卡相同的情况)
    total_mem = int(
        re.findall(r'([0-9]+)MiB \|',
                   os.popen('nvidia-smi -i 0').readlines()[8])[0])

    # 2.检测符合资源要求的GPU
    pids, pcpu, mem, gpu_id = gi.get_info()
    for i in range(detect_time - 1):
        time.sleep(1)
        pids, _pcpu, _mem, gpu_id = gi.get_info()
        _pcpu, _mem = np.asarray(_pcpu), np.asarray(_mem)
        pcpu = pcpu + _pcpu
        mem = mem + _mem
    pcpu, mem = np.asarray(pcpu) / detect_time, np.asarray(mem) / detect_time
    valid_gpus = np.argwhere((pcpu <= cpu_ratio * 100) & (
        mem <= mem_ratio * total_mem)).reshape(-1).tolist()
    valid_gpus = sorted(valid_gpus, key=lambda x: mem[x] * 100 + pcpu[x])

    # 3. 打印信息
    # info = ['GPU%d: %d%%-%.1fG' % (x[0], x[1], x[2] / 1024) for x in zip(valid_gpus, pcpu[valid_gpus], mem[valid_gpus])]
    # print('valid gpus: | '.join(info))
    return valid_gpus
예제 #3
0
def gpu_usage():
    total_memory = 32000
    available_device = GPUInfo.check_empty()
    percent, memory = GPUInfo.gpu_usage()
    for i in range(len(memory)):
        memory[i] = float(memory[i]) / total_memory
    print(memory)
    return memory
예제 #4
0
 def __init__(self):
     self.op_system_name = os.name
     self.user_name = getpass.getuser()
     # self.total, self.used, self.free = shutil.disk_usage("/")
     simp_list = shutil.disk_usage("/")
     self.total = simp_list[0] // (2**30)
     self.used = simp_list[1] // (2**30)
     self.free = simp_list[2] // (2**30)
     self.Gpu_is_empty = GPUInfo.get_info()
     self.Gpu_info = GPUInfo.get_info()
     self.mem = psutil.virtual_memory()
     self.mem_total = self.mem.total
     self.Virtual_memory = self.mem_total / (1024**3)
예제 #5
0
def set_GPU(num_of_GPUs):

    current_memory_gpu = GPUInfo.gpu_usage()[1]
    list_available_gpu = np.where(
        np.array(current_memory_gpu) < 1500)[0].astype('str').tolist()
    current_available_gpu = ",".join(list_available_gpu)
    if len(list_available_gpu) < num_of_GPUs:
        print("==============Warning==============")
        print("Your process had been terminated")
        print("Please decrease number of gpus you using")
        print(
            f"number of Devices available:\t{len(list_available_gpu)} gpu(s)")
        print(f"number of Device will use:\t{num_of_GPUs} gpu(s)")
        sys.exit()
    elif len(list_available_gpu) > num_of_GPUs:
        redundant_gpu = len(list_available_gpu) - num_of_GPUs
        list_available_gpu = list_available_gpu[:redundant_gpu]
        current_available_gpu = ",".join(list_available_gpu)
        print("[DEBUG]***********************************************")
        print(f"[DEBUG]You are using GPU(s): {current_available_gpu}")
        print("[DEBUG]***********************************************")
        os.environ["CUDA_VISIBLE_DEVICES"] = current_available_gpu
    else:
        print("[DEBUG]***********************************************")
        print(f"[DEBUG]You are using GPU(s): {current_available_gpu}")
        print("[DEBUG]***********************************************")
        os.environ["CUDA_VISIBLE_DEVICES"] = current_available_gpu
예제 #6
0
def handler(event, context):

    if isinstance(event['data'], dict) and "num_image" in event['data']:
        global NUM_IMAGE
        NUM_IMAGE = int(event['data']['num_image'])

    # Get GPU counts
    NUM_GPU = 0
    available_devices = GPUInfo.check_empty()
    if available_devices != None:
        NUM_GPU = len(available_devices)
    print("Current GPU num is {0}".format(NUM_GPU))

    counter = 0
    image_list = list()
    for img in os.listdir(INF_DIR):
        image_list.append(os.path.join(INF_DIR, img))
        counter += 1
        if counter == NUM_IMAGE:
            break

    start = time.time()

    if NUM_GPU == 0:
        run_sequential(image_list)
    else:
        # initialize Scheduler
        scheduler = Scheduler(NUM_GPU)
        # start multiprocessing
        scheduler.start(image_list)

    end = time.time()
    # print ("Time with model loading {0} for {1} images.".format(end - start, NUM_IMAGE))
    return ("Time with model loading {0} for {1} images.".format(
        end - start, NUM_IMAGE))
예제 #7
0
def set_GPU(num_of_GPUs, log, memory_restraint=0):
    try:
        from gpuinfo import GPUInfo
        current_memory_gpu = GPUInfo.gpu_usage()[1]
        if not memory_restraint:
            list_available_gpu = np.where(
                np.array(current_memory_gpu))[0].astype('str').tolist()

        else:
            list_available_gpu = np.where(
                np.array(current_memory_gpu) < memory_restraint)[0].astype(
                    'str').tolist()

    except:
        log.write_log(
            "Cannot find nvidia-smi, please include it into Environment Variables",
            message_type=0)
        print(
            "[INFO] Cannot find nvidia-smi, please include it into Environment Variables"
        )
        if torch.cuda.is_available():
            list_available_gpu = [str(i) for i in range(num_of_GPUs)]

        else:
            list_available_gpu = []

    list_gpu_using = list_available_gpu[:num_of_GPUs]

    if len(list_available_gpu) < num_of_GPUs and len(list_available_gpu) > 0:
        print("==============Warning==============")
        print("Your process had been terminated")
        print("Please decrease number of gpus you using")
        print(
            f"number of Devices available:\t{len(list_available_gpu)} gpu(s)")
        print(f"number of Device will use:\t{num_of_GPUs} gpu(s)")
        log.write_log(
            f"number of Devices available:\t{len(list_available_gpu)} gpu(s) < number of Device will use:\t{num_of_GPUs} gpu(s)",
            message_type=2)
        sys.exit()

    elif num_of_GPUs <= len(list_available_gpu) and num_of_GPUs != 0:
        current_available_gpu = ",".join(list_gpu_using)

    elif num_of_GPUs == 0 or len(list_available_gpu) == 0:
        current_available_gpu = "-1"

    print("[INFO] ***********************************************")

    if len(list_gpu_using) > 0:
        tmp_message = f"[INFO] You are using GPU(s): {current_available_gpu}"
    else:
        tmp_message = "[INFO] You are using CPU !"

    print(tmp_message)
    if log is not None:
        log.write_log(tmp_message, message_type=0)

    print("[INFO] ***********************************************")
    os.environ["CUDA_VISIBLE_DEVICES"] = current_available_gpu
예제 #8
0
def get_pids():
    # Gets all PIDs on all GPUs as a dictionary
    # Each key is a GPU ID
    info = GPUInfo.get_info()

    pids = info[0]
    pids = {value[0]: key for key, value in pids.items()}
    return pids
예제 #9
0
    def get_gpu_info(self):
        print("GPUINFO: Start loop")
        while self.gpu_thread:
            self.thread_lock.acquire()
            self.gpu_percent, self.gpu_memory = GPUInfo.gpu_usage()
            self.thread_lock.release()
            time.sleep(2)

        print("GPUINFO: End loop")
        return
def get_gpu_id(num_gpu=1):
    """get ID of GPUS
    :param num_gpu:  num of GPUs to use
    :return: gpu_id: ID of allocated GPUs
    """
    available_device = GPUInfo.check_empty()
    if len(available_device) >= num_gpu:
        gpu_id = available_device[:num_gpu]
    else:
        raise Exception('Only {} GPUs to use!'.format(len(available_device)))
    if num_gpu == 1:
        gpu_id = gpu_id[0]
    return gpu_id
예제 #11
0
def load_model():
    # exceed quota so need to load data from GCS. Will download data to local from GCS
    # bucket_name = 'license_plate_detector_jonathan'
    # file_id = ['yolov3_custom_train_1800.weights', 'crnn_kurapan.h5', 'craft_mlt_25k.h5']

    # storage_client = storage.Client.from_service_account_json('auth_key.json')
    # bucket = storage_client.bucket(bucket_name)

    # for source_blob_name in file_id:
    # if source_blob_name not in os.listdir():
    # blob = bucket.blob(source_blob_name)
    # blob.download_to_filename(source_blob_name)

    # YOLO CONFIGURATIONS
    net = cv2.dnn.readNetFromDarknet('yolov3_custom_train.cfg',
                                     'yolov3_custom_train_1800.weights')
    net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)

    # check if GPU is available else use CPU
    gpus = GPUInfo.check_empty()
    print('gpus: ', gpus)
    if gpus is not None:
        net.setPreferableTarget(cv2.dnn.DNN_TARGET_OPENCL)
    else:
        net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)

    # keras-ocr will load from local file
    detector = keras_ocr.detection.Detector(
        weights='clovaai_general',
        load_from_torch=False,
        optimizer='adam',
        backbone_name='vgg',
        weights_path_local='craft_mlt_25k.h5')
    recognizer = keras_ocr.recognition.Recognizer(
        alphabet=None,
        weights='kurapan',
        build_params=None,
        weights_path='crnn_kurapan.h5')

    pipeline = keras_ocr.pipeline.Pipeline(detector=detector,
                                           recognizer=recognizer)

    # return YOLOv3 model, KerasOCR model
    return [net, pipeline]
예제 #12
0
    def __getitem__(self, index):
        vid_dir, label, frame_count, class_name = self.data_list[index]
        buffer = 0 #np.empty((self.clip_len, self.resize_height, self.resize_width, 3), np.dtype('float32'))
        text = 0
        try:
            if 'text_only' != self.method:
                buffer = self.load_frames(vid_dir, frame_count)
                if self.do_crop:
                    buffer = self.spatial_crop(buffer, self.crop_size)
                buffer = self.normalize(buffer)
                buffer = self.to_tensor(buffer)
            if 'joint' == self.method or 'text_only' == self.method:
                key=(vid_dir.split('/')[-1]).split('.')[0]
                text = get_text_description(self.dictionary_pickle, key)
                text=text.astype('float32')
                #text=text
        except Exception as e:
            print("Error ", e)
            from gpuinfo import GPUInfo
            print(GPUInfo.gpu_usage())
            #sys.exit()

        return buffer, label, text
예제 #13
0
 async def process_video(self):
     total_time = sum([video.video_length_flv for video in self.videos])
     max_size = 8000_000 * 8  # Kb
     audio_bitrate = 320
     video_bitrate = (max_size / total_time -
                      audio_bitrate) - 500  # just to be safe
     max_video_bitrate = float(
         8000)  # BiliBili now re-encode every video anyways
     video_bitrate = int(min(max_video_bitrate, video_bitrate))
     ffmpeg_command = f'''ffmpeg -y -loop 1 -t {total_time} \
     -i "{self.output_path()['he_graph']}" \
     -f concat \
     -safe 0 \
     -i "{self.output_path()['concat_file']}" \
     -t {total_time} \
     -filter_complex "
     [0:v][1:v]scale2ref=iw:iw*(main_h/main_w)[color][ref];
     [color]split[color1][color2];
     [color1]hue=s=0[gray];
     [color2]negate=negate_alpha=1[color_neg];
     [gray]negate=negate_alpha=1[gray_neg];
     color=black:d={total_time}[black];
     [black][ref]scale2ref[blackref][ref2];
     [blackref]split[blackref1][blackref2];
     [color_neg][blackref1]overlay=x=t/{total_time}*W-W[color_crop_neg];
     [gray_neg][blackref2]overlay=x=t/{total_time}*W[gray_crop_neg];
     [color_crop_neg]negate=negate_alpha=1[color_crop];
     [gray_crop_neg]negate=negate_alpha=1[gray_crop];
     [ref2][color_crop]overlay=y=main_h-overlay_h[out_color];
     [out_color][gray_crop]overlay=y=main_h-overlay_h[out];
     [out]ass='{self.output_path()['ass']}'[out_sub]" \
     -map "[out_sub]" -map 1:a ''' + \
                      (" -c:v h264_nvenc -preset slow "
                       if GPUInfo.check_empty() is not None else " -c:v libx264 -preset medium ") + \
                      f'-b:v {video_bitrate}K' + f''' -b:a 320K -ar 44100  "{self.output_path()['danmaku_video']}" \
                 ''' + f'>> "{self.output_path()["video_log"]}" 2>&1'
     await async_wait_output(ffmpeg_command)
예제 #14
0
file = open("RendimientoAgrupados.txt", "w")
tiempo = int(input("Ingrese tiempo para tomar datos (segundos)[ejem 120s]: "))
intervalo = float(
    input("Ingrese intervalo de tiempo entre muestra (segundos)[ejem 0.5s]: "))
muestras = int(tiempo / intervalo)

while True:
    selecOption = input("realizar Experimento[y / n]: ")
    if selecOption == "n":
        break
    else:
        label = input("Nombre del Experimento [ejem: 320x240]: ")
        for x in range(muestras):
            cpu_per = psutil.cpu_percent(interval=intervalo)  # % de uso CPU
            aux = psutil.virtual_memory()
            ram = aux[2]  # % de uso Ram
            gpu_percent, gpu_memory = GPUInfo.gpu_usage()
            gpu = gpu_percent[0] + round(random(), 1)  # % de uso GPU
            gpu_vram = round((gpu_memory[0] * 100) / vram, 1)  # % uso RAM GPU
            file.write(str(cpu_per) + " ")
            file.write(str(ram) + " ")
            file.write(str(gpu) + " ")
            file.write(str(gpu_vram) + " ")
            file.write(label + "\n")

file.close()
print("TiempoTranscurrido(s)=" + str(tiempo) + " NúmeroMuestras=" +
      str(muestras))
time.sleep(8)
dest = '/home/storms/Progress/Images/gauges/temp_gaugej_br.png'

src = '/home/storms/Progress/Images/gauges/bottom_right/temp_gaugej_br_' + mem_str + '.png'

copyfile(src, dest)

gpu_temp = int(
    subprocess.check_output(["nvidia-settings -query [gpu:0]/GPUCoreTemp -t"],
                            shell=True).decode("utf-8"))
gpu_temp = str(14 + 2 * round(gpu_temp / 2))
src = '/home/storms/Progress/Images/gauges/top_right/temp_gaugej_tr_' + gpu_temp + '.png'
dest = '/home/storms/Progress/Images/gauges/temp_gaugej_tr.png'

copyfile(src, dest)

use_stats = GPUInfo.gpu_usage()
gpu_usage = use_stats[0][0]
vram_usage = round(use_stats[1][0] / 1000)

dest = '/home/storms/Progress/Images/gauges/zotac_symbol.png'
src = '/home/storms/Progress/Images/gauges/zotac_body/zotac_gaming_logo' + str(
    gpu_usage) + '-01.png'
copyfile(src, dest)

dest = '/home/storms/Progress/Images/gauges/Zotac_Wings.png'
src = '/home/storms/Progress/Images/gauges/Zotac_Wings/zotac_wings' + str(
    vram_usage) + '.png'
copyfile(src, dest)
"""""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """
Section 2: Update Progress Bars 
""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """"""
예제 #16
0
    print("Please install GPU version of TF")

from model.YOLOv4 import YOLOv4
import numpy as np
import gc
from tqdm import tqdm
from gpuinfo import GPUInfo

x = 320
y = 320
inp = np.ones((4, x, y, 3), dtype=np.float64)
network = YOLOv4(side=x)
network.compile(loss="mse", optimizer="sgd", metrics="acc")

small, medium, large = network(inp)
print(GPUInfo.get_info())
print((x, y), " work and give :", small.shape, medium.shape, large.shape)
network.summary()
print(network.layers[-1].output_shape)

from dataset import Dataset
dataset = Dataset(4,
                  network.layers[-1].output_shape,
                  shape=network.layers[0].input_shape[1:3])
print("input shape is :", dataset.shape)

image_path = dataset.get_dir("test")
labels = dataset.get_label_data("test")

print(np.array(image_path).shape)
print(labels[0])
예제 #17
0
def gpu_info():
    try:
        percent, memory = GPUInfo.gpu_usage()
    except ValueError:
        return "Error when read GPU utilization"
    return "precent: %r, memory: %r" % (percent, memory)
예제 #18
0
            cpu
        )  # here the same, but only 1 space. because 85.6 have only 4 characters
    else:
        cpuStr = str(
            cpu
        )  #100.0 is 5 characters so there is no need to put spaces in before

    # CPU Speed
    cpufreq = round(
        psutil.cpu_freq().current / 1000, 1
    )  # get cpu frequency, divide by 1000 because 1GHz = 1000MHz, round to one decimal place
    cpufreqStr = "  " + str(
        cpufreq)  # convert to string and make 5 spaces in total

    # GPU Info
    percent, memory = GPUInfo.gpu_usage(
    )  # get GPU utilization and used memory info

    # GPU Utilization
    percentNum = percent[0]  # obtain number from the percent list

    if percentNum < 10:  # same as for CPU (make 5 spaces)
        percentStr = "    " + str(percentNum)
    elif percentNum < 100:
        percentStr = "   " + str(percentNum)
    else:
        percentStr = "  " + str(percentNum)

    # GPU Used Memory
    memoryNum = round(
        memory[0] / 1024,
        1)  # obtain number from the memory list and round to one decimal place
예제 #19
0
from gpuinfo import GPUInfo

available_device=GPUInfo.check_empty()
#available_device就是一个含有所有没有任务的gpu编号的列表
print(available_device)

percent,memory=GPUInfo.gpu_usage()
#获得所有gpu的使用百分比和显存占用量
print(percent, memory)

min_percent=percent.index(min([percent[i] for i in available_device]))
#未被使用的gpu里percent最小的
print(min_percent)

min_memory=memory.index(min([memory[i] for i in available_device]))
#未被使用的gpu里显存占用量最少的

print(min_memory)
예제 #20
0
    else:
        # command = ['nohup', 'python -u', d['command']+' --gpu %s'%(str(gpu_id)), '&disown']
        command = ['nohup', d['command']+' --gpu %s'%(str(gpu_id)), '&disown']
        command = 'nohup '+d['command']+' --gpu %s'%(str(gpu_id))#+' &disown'
    
    # * Call it from terminal
    print(command)
    subprocess.call(command, shell=True)

    # * Delete the execute file
    Path(execute_file).unlink()

if __name__ == '__main__':

    # * Keep script running in background indefinitely
    while True:
        
        # * Check available GPUs
        gpus_available = GPUInfo.check_empty()
        if gpus_available:
            
            # * For each available GPU, attempt to run an experiment on it.
            for gpu_id in gpus_available:
                exp_file = find_oldest_experiment()

                # * An experiment is only launched if it exists 
                if exp_file:
                    launch_and_remove_experiment(exp_file, gpu_id=gpu_id)
        
        # * Check for new experiments every N seconds
        time.sleep(5)
def video_processor():
    while True:
        request_json = video_request_queue.get()
        try:
            flv_file_path = request_json['RelativePath']
            he_time = request_json['he_time']
            base_file_path = flv_file_path.rpartition('.')[0]
            video_file_path = base_file_path + ".bar.mp4"
            png_file_path = base_file_path + ".png"
            video_log_path = base_file_path + ".video.log"
            total_seconds_str = subprocess.check_output(
                f'ffprobe -v error -show_entries format=duration '
                f'-of default=noprint_wrappers=1:nokey=1 "{flv_file_path}"',
                shell=True)
            total_seconds = float(total_seconds_str)
            max_size = 8000_000 * 8  # Kb
            audio_bitrate = 320
            video_bitrate = (max_size / total_seconds -
                             audio_bitrate) - 500  # just to be safe
            max_video_bitrate = float(4500)
            video_bitrate = int(min(max_video_bitrate, video_bitrate))

            he_time = int(min(float(he_time), total_seconds -
                              5))  # prevent preview image generation error

            ffmpeg_command_img = f"ffmpeg -ss {he_time} -i \"{flv_file_path}\" -vframes 1 \"{png_file_path}\"" \
                                 f" >> \"{video_log_path}\" 2>&1"
            print(ffmpeg_command_img, file=sys.stderr)
            return_value = os.system(ffmpeg_command_img)
            return_text = "Processing completed" if return_value == 0 else "Processing error"
            print(
                f"Room {request_json['RoomId']} at {request_json['StartRecordTime']}: image {return_text}",
                file=sys.stderr)
            if not os.path.isfile(png_file_path):
                print("Video preview file cannot be found", file=sys.stderr)
            else:
                print("uploading quick video", file=sys.stderr)
                request_json['is_update'] = False
                upload_no_danmaku_video(request_json)
            ffmpeg_command = f'FILE=\"{base_file_path}\" ' + ''' \
&& TIME=`ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 "${FILE}.flv"`\
&& ffmpeg -loop 1 -t ${TIME} \
-i "${FILE}.he.png" \
-i "${FILE}.flv" \
-filter_complex "
[0:v][1:v]scale2ref=iw:iw*(main_h/main_w)[color][ref];
[color]split[color1][color2];
[color1]hue=s=0[gray];
[color2]negate=negate_alpha=1[color_neg];
[gray]negate=negate_alpha=1[gray_neg];
color=black:d=${TIME}[black];
[black][ref]scale2ref[blackref][ref2];
[blackref]split[blackref1][blackref2];
[color_neg][blackref1]overlay=x=t/${TIME}*W-W[color_crop_neg];
[gray_neg][blackref2]overlay=x=t/${TIME}*W[gray_crop_neg];
[color_crop_neg]negate=negate_alpha=1[color_crop];
[gray_crop_neg]negate=negate_alpha=1[gray_crop];
[ref2][color_crop]overlay=y=main_h-overlay_h[out_color];
[out_color][gray_crop]overlay=y=main_h-overlay_h[out];
[out]ass='${FILE}.ass'[out_sub]" \
-map "[out_sub]" -map 1:a ''' + \
                             (" -c:v h264_nvenc -preset slow "
                              if GPUInfo.check_empty() is not None else " -c:v libx264 -preset medium ") + \
                             f'-b:v {video_bitrate}K' + ''' -b:a 320K -ar 44100  "${FILE}.bar.mp4" \
            ''' + f'>> "{video_log_path}" 2>&1'
            print(ffmpeg_command, file=sys.stderr)
            return_value = os.system(ffmpeg_command)
            return_text = "Processing completed" if return_value == 0 else "Processing error"
            print(
                f"Room {request_json['RoomId']} at {request_json['StartRecordTime']}: video {return_text}",
                file=sys.stderr)
            if not os.path.isfile(video_file_path):
                print("Video file cannot be found", file=sys.stderr)
            else:
                print("Updating full video", file=sys.stderr)
                request_json['is_update'] = True
                upload_no_danmaku_video(request_json)
        except Exception as err:
            # noinspection PyBroadException
            try:
                print(
                    f"Room {request_json['RoomId']} at {request_json['StartRecordTime']}: {err}",
                    file=sys.stderr)
                print(traceback.format_exc(), file=sys.stderr)
            except Exception:
                print(f"Unknown video exception", file=sys.stderr)
                print(traceback.format_exc(), file=sys.stderr)
        finally:
            print(f"Video queue length: {danmaku_request_queue.qsize()}",
                  file=sys.stderr)
            sys.stderr.flush()
예제 #22
0
def compute_features(dataloader, model, N):
    if args.verbose:
        print('Compute features')
    batch_time = AverageMeter()
    end = time.time()
    model.eval()
    print("Before", GPUInfo.gpu_usage())

    # discard the label information in the dataloader
    try:
        for i, (input_tensor, _, text) in enumerate(dataloader):
            torch.no_grad()

            try:
                if text_only:
                    #text = torch.autograd.Variable(text)#.cuda())  # , volatile=True)
                    #print(text[0].split()[:512].size)

                    aux = model.extract_features(
                        text.cuda()).data.cpu().numpy()
                    aux = aux.astype('float32')

                elif is_joint:
                    input_var = torch.autograd.Variable(
                        input_tensor.cuda())  # , volatile=True)
                    text = text.cuda()
                    aux = model.module.extract_features(
                        input_var, text).data.cpu().numpy()
                    aux = aux.astype('float32')

                else:
                    input_var = torch.autograd.Variable(
                        input_tensor.cuda())  # , volatile=True)
                    aux = model.module.extract_features(
                        input_var).data.cpu().numpy()
                    aux = aux.astype('float32')
                if i == 0:
                    features = np.zeros((N, aux.shape[1]), dtype='float32')

                if i < len(dataloader):
                    features[i * args.batch:(i + 1) * args.batch] = aux
                else:
                    # special treatment for final batch
                    features[i * args.batch:] = aux

                # measure elapsed time
                batch_time.update(time.time() - end)
                end = time.time()

                if args.verbose and (i % 50) == 0:
                    print('{0} / {1}\t'
                          'Time: {batch_time.val:.3f} ({batch_time.avg:.3f})'.
                          format(i, len(dataloader), batch_time=batch_time))
            except Exception as e:
                print("RAM Usage: ", str(psutil.virtual_memory().percent))
                print(GPUInfo.gpu_usage())

                print("failed: ", e)
    except RuntimeError:
        print("RAM Usage: ", str(psutil.virtual_memory().percent))
        print(GPUInfo.gpu_usage())

        return features
    except Exception as e:
        print("Error {}".format(e))
    finally:
        return features