def main(args): folder = args.image_folder if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) im_files = [] for dp, dn, files in os.walk(folder): for f in files: if f.endswith(args.image_ext) or f.endswith('.png'): im_files.append(os.path.join(dp, f)) im_files.sort() for im_file in tqdm(im_files): out_name = im_file.replace(args.image_folder, args.output_dir) im = cv2.imread(im_file) data = {} queue = 'TEST_ALI_20190516' data['image_name'] = im_file data['image_file'] = to_image_key(im_file) data['output_queue'] = queue data['from_detectron'] = True data['others_threshold'] = 0.7 data['save_root'] = args.output_dir data['save_folder'] = 'class_images' enqueue('ALI_CLASSIFICATION_TEST_INPUT_0', data) data = dequeue(queue) if data: print('\nDequeued from {}: {}'.format(queue, data)) print('Inference done.')
from __future__ import print_function import sys from message_queue import enqueue q_name = sys.argv[1] data = eval(sys.argv[2]) enqueue(q_name, data) print('Enqueued to {}: {}'.format(q_name, data))
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function import os import sys import time import numpy as np sys.path.insert(0, '.') import image_store as image_store from message_queue import clear, dequeue, enqueue image_file = 'tests/images/budweiser.jpg' output_queue = 'CIGARETTE_TEST_{:.6f}'.format(np.random.random()) enqueue('CIGARETTE_INPUT', { 'image_file': image_store.to_image_key(image_file, 'BUD'), 'output_queue': output_queue, 'context': os.path.basename(image_file), 'enqueue_at': time.time() }) result = dequeue(output_queue) clear(output_queue) print(result)
def main(): args = parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) with open(args.classes) as f: classes = [c.strip() for c in f.readlines()] label_map = {i: v for i, v in enumerate(classes)} with open(args.classes_mapping) as f: cm = [c.strip() for c in f.readlines()] class2id = {v: i for i, v in enumerate(cm)} cigarette_graph = tf.Graph() with cigarette_graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(args.model_path, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = args.gpu_fraction with cigarette_graph.as_default(): sess = tf.Session(graph=cigarette_graph, config=config) image_tensor1 = cigarette_graph.get_tensor_by_name('input1:0') image_tensor2 = cigarette_graph.get_tensor_by_name('input2:0') image_tensor3 = cigarette_graph.get_tensor_by_name('input3:0') is_training = cigarette_graph.get_tensor_by_name('is_training:0') prob = cigarette_graph.get_tensor_by_name('prob:0') soft = tf.nn.softmax(prob) prediction = tf.argmax(soft, axis=1) score = tf.reduce_max(soft, reduction_indices=[1])[0] logger.info('CIGARETTE Server started!') try: while True: if not runnable: logger.info('EXIT CIGARETTE SERVER!') break data = None data = dequeue('CIGARETTE_INPUT', False) if data: logger.info('Dequeued from CIGARETTE_INPUT: {}'.format(data)) else: time.sleep(1) continue if not isinstance(data, dict): logger.error('Dequeued data format is not correct.') continue image_oris = data.get('images', None) im_keys = data.get('image_files', None) context = data.get('context', None) output_queue = data.get('output_queue', None) if im_keys is None: logger.error('im_keys is None.') continue if not isinstance(im_keys, list): logger.error('im_keys is not a list.') continue if output_queue is None: logger.error('output queue is None.') continue logger.info('Processing {}, {} and {}...'.format( im_keys[0], im_keys[1], im_keys[2])) try: _im_file1 = image_store.get_as_file(im_keys[0], True) _im_file2 = image_store.get_as_file(im_keys[1], True) _im_file3 = image_store.get_as_file(im_keys[2], True) except Exception: logger.error( 'Get file from im_key failed: {}, {} or {}.'.format( im_keys[0], im_keys[1], im_keys[2])) _im_file1 = _im_file2 = _im_file3 = None try: image1 = process_image(_im_file1) image2 = process_image(_im_file2) image3 = process_image(_im_file3) except Exception: logger.error('Read image failed: {}, {} or {}.'.format( _im_file1, _im_file2, _im_file3)) image1 = image2 = image3 = None if image1 is None or image2 is None or image3 is None: result = { 'image': image_oris, 'context': context, 'type': str(-1), 'score': str(-1) } result['enqueue_at'] = time.time() logger.info('Enqueue to {}'.format(output_queue)) enqueue(output_queue, result) else: image1 = np.expand_dims(image1, axis=0) image2 = np.expand_dims(image2, axis=0) image3 = np.expand_dims(image3, axis=0) pred, sc = sess.run( [prediction, score], feed_dict={ image_tensor1: image1, image_tensor2: image2, image_tensor3: image3, is_training: False }) print(pred) print(sc) try: result = { 'image': image_oris, 'context': context, 'type': str(class2id[label_map[int(pred)]]), 'score': float(round(sc, 3)) } except Exception: print(label_map[int(pred)]) result = { 'image': image_oris, 'context': context, 'type': str(-1), 'score': str(-1) } result['enqueue_at'] = time.time() print('Enqueue to {}'.format(output_queue)) enqueue(output_queue, result) if SAVE and sc <= SAVE_THRESHOLD: folder = os.path.join( SAVE_PATH, datetime.datetime.now().strftime('%Y%m%d'), label_map[int(pred)]) if not os.path.exists(folder): os.makedirs(folder) shutil.copy( _im_file1, os.path.join( folder, '{:.3f}_'.format(sc) + os.path.basename(_im_file1))) shutil.copy( _im_file2, os.path.join( folder, '{:.3f}_'.format(sc) + os.path.basename(_im_file2))) shutil.copy( _im_file3, os.path.join( folder, '{:.3f}_'.format(sc) + os.path.basename(_im_file3))) if _im_file1 is not None and os.path.exists(_im_file1): os.remove(_im_file1) if _im_file2 is not None and os.path.exists(_im_file2): os.remove(_im_file2) if _im_file3 is not None and os.path.exists(_im_file3): os.remove(_im_file3) except Exception as e: logger.error(e, exc_info=True) except: logger.error('Unknown error occurred!')