コード例 #1
0
 def test_C01T01_list_plugins(self):
     """ Asses that plugin list has more than zero installed plugins """
     self.conf.list_plugins = True
     self.fe = FeatureExtractor(self.conf)
     """ FeatureExtractor instance """
     # assertion
     assert (len(self.fe.discovered_core_plugins) > 0)
コード例 #2
0
def init_fn(network='inception_resnet_v2',
            checkpoint='./checkpoints/inception_resnet_v2.ckpt',
            layer_names='PreLogitsFlatten',
            preproc_func='inception',
            preproc_threads=2,
            batch_size=64,
            num_classes=1001):
    '''
        Args:
            network
            checkpoint
            layer_names
            preproc_func
            preproc_threads
            batch_size
            num_classes
    '''
    layer_names = layer_names.split(',')

    # Initialize the feature extractor
    feature_extractor = FeatureExtractor(network_name=network,
                                         checkpoint_path=checkpoint,
                                         batch_size=batch_size,
                                         num_classes=num_classes,
                                         preproc_func_name=preproc_func,
                                         preproc_threads=preproc_threads)

    return feature_extractor
コード例 #3
0
ファイル: app.py プロジェクト: yoojong0820/data-models
def main():
    """
    This assumes that the offline model pipeline was already processed,
    meaning the feature extractors & model can be loaded from AWS S3
    """

    app = Flask(__name__)

    # build AWS S3 Client & point to the bucket
    s3 = build_s3()
    bucket_name = 'team00-test-bucket'

    # point to the location storing feature extractor & offline trained model
    column_transformer = FeatureExtractor.load_from_s3(
        s3, bucket_name, 'titanic/column_transformer.pkl')
    model = LogisticRegressionModel.load_from_s3(s3, bucket_name,
                                                 'titanic/model.pkl')

    @app.route('/predict', methods=['POST'])
    def predict():
        data = request.get_json(force=True)
        mapped = {k: [v] for k, v in data.items()}
        df = pd.DataFrame.from_dict(mapped)
        transformed = column_transformer.transform(df)
        prediction = model.predict_proba(transformed)
        return jsonify(label_zero_estimate=prediction[0][0],
                       label_one_estimate=prediction[0][1])

    app.run(host='0.0.0.0', port=5000, debug=True)
コード例 #4
0
 def test_C03T01_core(self):
     """ Loads plugin from FeatureExtractor using parameters from setup_method() and Asses that output file has 1 column and num_ticks - forward_ticks """
     self.fe = FeatureExtractor(self.conf)
     # get the number of rows and cols from out_file
     rows_o, cols_o = self.get_size_csv(self.conf.output_file)
     # assertion
     assert (cols_o == self.fe.ep_core.cols_d * self.conf.num_components)
コード例 #5
0
def create_app(test_config=None):
    """Create and configure an instance of the Flask application."""
    app = Flask(__name__, instance_relative_config=True)
    BASE_DIR = os.path.dirname(os.path.abspath(__file__))
    # read plugin configuration JSON file
    p_config = read_plugin_config()
    # initialize FeatureExtractor
    fe = FeatureExtractor(p_config)
    # set flask app parameters
    app.config.from_mapping(
        # a default secret that should be overridden by instance config
        SECRET_KEY="dev",
        # store the database in the instance folder
        DATABASE=os.path.join(BASE_DIR, "test.sqlite"),
        # plugin configuration from visualizer.json
        P_CONFIG = p_config, 
        # feature_extractor instance with plugins already loaded
        FE = fe
    )
    if test_config is None:
        # load the instance config, if it exists, when not testing
        app.config.from_pyfile("config.py", silent=True)
    else:
        # load the test config if passed in
        app.config.update(test_config)

    # ensure the instance folder exists
    try:
        os.makedirs(app.instance_path)
    except OSError:
        pass

    #@app.route("/hello")
    #def hello():
    #    return "Hello, World!"

    # register the database commands
    from feature_extractor.visualizer import db

    db.init_app(app)

    # apply the blueprints to the app
    from feature_extractor.visualizer import auth, visualizer
    
    # get the output plugin template folder
    plugin_folder = fe.ep_output.template_path(p_config)
    # construct the blueprint 
    vis_bp = visualizer.visualizer_blueprint(plugin_folder)
    
    # register the blueprints
    app.register_blueprint(auth.bp)
    app.register_blueprint(vis_bp) 

    # make url_for('index') == url_for('blog.index')
    # in another app, you might define a separate main index here with
    # app.route, while giving the blog blueprint a url_prefix, but for
    # the tutorial the blog will be the main index
    app.add_url_rule("/", endpoint="index")

    return app
コード例 #6
0
 def test_C01T02_plugin_load(self):
     """ Loads HeuristicTS using parameters from setup_method() and Asses that output file has 1 column and num_ticks - forward_ticks """
     self.fe = FeatureExtractor(self.conf)
     # get the number of rows and cols from out_file
     rows_o, cols_o = self.get_size_csv(self.conf.output_file)
     # assertion
     assert (cols_o == 1) and (rows_o == self.fe.ep_core.rows_d -
                               self.fe.ep_core.conf.forward_ticks)
コード例 #7
0
def start(path):
    count = [0 for _ in range(num)]
    label_count = [0 for _ in range(num)]
    per_accuracy = [0 for _ in range(num)]
    total = 0
    with open(os.path.join(path, label_name)) as label_file:
        labels = label_file.readlines()

    labels = random.sample(labels, 1000)

    feature_extractor = FeatureExtractor(network_name='flownet_si',
                                         checkpoint_path=ck_path,
                                         batch_size=1,
                                         num_classes=num,
                                         preproc_func_name='flownet_si')
    feature_extractor.print_network_summary()

    for label in tqdm(labels):
        file_name = label.strip().split()[0]
        label_num = label.strip().split()[1]
        label_num = int(label_num)
        label_count[label_num] += 1
        image1 = os.path.join(path, parts[0], file_name)
        image2 = os.path.join(path, parts[1], file_name)
        clip_class = classification_placeholder_input(feature_extractor,
                                                      image1, image2, 'Logits',
                                                      1, 31)
        if clip_class == label_num:
            count[clip_class] += 1
            total += 1
    for i in range(len(label_count)):
        if label_count[i]:
            per_accuracy[i] = count[i] / label_count[i]
    print(label_count)
    print(count)
    print(per_accuracy)
    print(total)
    print(total / len(labels))
コード例 #8
0
                        help="batch size (32)")
    parser.add_argument("--num_classes",
                        dest="num_classes",
                        type=int,
                        default=1001,
                        help="number of classes (1001)")
    args = parser.parse_args()

    # resnet_v2_101/logits,resnet_v2_101/pool4 => to list of layer names
    layer_names = args.layer_names.split(",")

    # Initialize the feature extractor
    feature_extractor = FeatureExtractor(
        network_name=args.network_name,
        checkpoint_path=args.checkpoint,
        batch_size=args.batch_size,
        num_classes=args.num_classes,
        preproc_func_name=args.preproc_func,
        preproc_threads=args.num_preproc_threads)

    # Print the network summary, use these layer names for feature extraction
    #feature_extractor.print_network_summary()

    # Feature extraction example using a filename queue to feed images
    feature_dataset = feature_extraction_queue(feature_extractor,
                                               args.image_path, layer_names,
                                               args.batch_size,
                                               args.num_classes)

    #    print(type(feature_dataset))
    #   for i in feature_dataset:
コード例 #9
0
    parser.add_argument("--batch_size",
                        dest="batch_size",
                        type=int,
                        default=32,
                        help="batch size (32)")
    parser.add_argument("--num_classes",
                        dest="num_classes",
                        type=int,
                        default=1001,
                        help="number of classes (1001)")
    args = parser.parse_args()

    # Initialize the feature extractor
    feature_extractor = FeatureExtractor(network_name=args.network_name,
                                         checkpoint_path=args.checkpoint,
                                         batch_size=args.batch_size,
                                         num_classes=args.num_classes,
                                         preproc_func_name=args.preproc_func)

    # Print the network summary, use these layer names for feature extraction
    feature_extractor.print_network_summary()

    # OPTION 1. Test image classification using a filename queue to feed images
    classification_queue_input(feature_extractor, args.image_path,
                               args.logits_name, args.batch_size,
                               args.num_classes)

    # OPTION 2. Test image classification by manually feeding images into placeholders
    classification_placeholder_input(feature_extractor, args.image_path,
                                     args.logits_name, args.batch_size,
                                     args.num_classes)
コード例 #10
0
if __name__ == "__main__":

    parser = argparse.ArgumentParser(description="TensorFlow feature extraction")
    parser.add_argument("--network", dest="network_name", type=str, required=True, help="model name, e.g. 'resnet_v2_101'")
    parser.add_argument("--checkpoint", dest="checkpoint", type=str, required=True, help="path to pre-trained checkpoint file")
    parser.add_argument("--image_path", dest="image_path", type=str, required=True, help="path to directory containing images")
    parser.add_argument("--logits_name", dest="logits_name", type=str, required=True, help="name of logits layer in network")
    parser.add_argument("--preproc_func", dest="preproc_func", type=str, default=None, help="force the image preprocessing function (None)")
    parser.add_argument("--batch_size", dest="batch_size", type=int, default=32, help="batch size (32)")
    parser.add_argument("--num_classes", dest="num_classes", type=int, default=1001, help="number of classes (1001)")
    args = parser.parse_args()

    # Initialize the feature extractor
    feature_extractor = FeatureExtractor(
        network_name=args.network_name,
        checkpoint_path=args.checkpoint,
        batch_size=args.batch_size,
        num_classes=args.num_classes,
        preproc_func_name=args.preproc_func)

    # Print the network summary, use these layer names for feature extraction
    feature_extractor.print_network_summary()

    # OPTION 1. Test image classification using a filename queue to feed images
    classification_queue_input(
        feature_extractor, args.image_path, args.logits_name,
        args.batch_size, args.num_classes)

    # OPTION 2. Test image classification by manually feeding images into placeholders
    classification_placeholder_input(
        feature_extractor, args.image_path, args.logits_name,
        args.batch_size, args.num_classes)
コード例 #11
0
def main(path):
    count = [0 for _ in range(nums)]
    label_count = [0 for _ in range(nums)]
    per_accuracy = [0 for _ in range(nums)]
    TP = [0 for _ in range(nums)]
    TN = [0 for _ in range(nums)]
    FP = [0 for _ in range(nums)]
    FN = [0 for _ in range(nums)]
    recall = [0 for _ in range(nums)]
    precision = [0 for _ in range(nums)]
    f1 = [0 for _ in range(nums)]
    accuracy = [0 for _ in range(nums)]
    total = 0
    confusion = np.zeros((nums, nums), dtype=int)
    TN_matrix = np.zeros((nums, nums), dtype=int)
    with open(os.path.join(path, label_name)) as label_file:
        labels = label_file.readlines()

    transitions = 0
    if output_mode == 2:
        reader = pywrap_tensorflow.NewCheckpointReader(ck_path)
        var_to_shape_map = reader.get_variable_to_shape_map()
        transitions = reader.get_tensor('transitions')

    #labels = random.sample(labels, 1000)

    feature_extractor = FeatureExtractor(network_name=net_name,
                                         input_mode=input_mode,
                                         output_mode=output_mode,
                                         checkpoint_path=ck_path,
                                         batch_size=batch_size,
                                         num_classes=nums,
                                         preproc_func_name=net_name)
    feature_extractor.print_network_summary()

    for i in tqdm(range(int(len(labels) / batch_size))):
        image1 = []
        image2 = []
        label_num = []
        for j in range(batch_size):
            file_name = labels[i * batch_size + j].strip().split()[0]
            label_num.append(int(labels[i * batch_size +
                                        j].strip().split()[1]))
            label_count[label_num[j]] += 1
            image1.append(os.path.join(path, parts[0], file_name))
            image2.append(os.path.join(path, parts[1], file_name))
        clip_class = classification_placeholder_input(feature_extractor,
                                                      transitions, image1,
                                                      image2,
                                                      net_name + '/fc8',
                                                      batch_size, 19)
        for j in range(batch_size):
            confusion[label_num[j]][clip_class[j]] += 1
            if clip_class[j] == label_num[j]:
                count[clip_class[j]] += 1
                total += 1
    for i in range(len(label_count)):
        if label_count[i]:
            per_accuracy[i] = count[i] / label_count[i]
    for i in range(nums):
        TP[i] = confusion[i][i]
        FP[i] = np.sum(confusion[:, i]) - TP[i]
        FN[i] = np.sum(confusion[i]) - TP[i]
        TN[i] = len(labels) - TP[i] - FP[i] - FN[i]
        recall[i] = TP[i] / (TP[i] + FN[i])
        precision[i] = TP[i] / (TP[i] + FP[i])
        f1[i] = 2 * precision[i] * recall[i] / (precision[i] + recall[i])
        accuracy[i] = (TP[i] + TN[i]) / (TP[i] + TN[i] + FP[i] + FN[i])
    print(label_count)
    print(count)
    print(per_accuracy)
    print(total)
    print(total / len(labels))
    print(confusion)
    print('TP:', TP)
    print('FP:', FP)
    print('FN:', FN)
    print('TN:', TN)
    print('precision:', precision)
    print('recall:', recall)
    print('f1:', f1)
    print('accuracy:', accuracy)
コード例 #12
0
def start(video_name):
  box_o = [0,0,0,0]
  cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
  cap = cv2.VideoCapture(video_name)
  width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
  height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
  frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
  cv2.createTrackbar('time', 'frame', 0, frames, nothing)
  loop_flag = 0
  pos = 0
  label_name = []

  with open(label_path) as label_file:
    labels = label_file.readlines()
  for label in labels:
    label_name.append(label.strip().split(':')[1])
  print(label_name)

  feature_extractor = FeatureExtractor(
    network_name='flownet_s',
    checkpoint_path='/home/cheer/video_test/corre/flownet_s/model.ckpt-12000',
    batch_size=1,
    num_classes=30,
    preproc_func_name='flownet_s')
  feature_extractor.print_network_summary()

  if cap.isOpened():
    ret, frameA = cap.read()
  while(cap.isOpened()):
    if loop_flag == pos:
      loop_flag = loop_flag + 1
      cv2.setTrackbarPos('time', 'frame', loop_flag)
    else:
      pos = cv2.getTrackbarPos('time', 'frame')
      loop_flag = pos
      cap.set(cv2.CAP_PROP_POS_FRAMES, pos)
    ret, frameB = cap.read()
    diff, thresh, cnts, score= compare_frame(frameA, frameB)
    frameC = frameB.copy()
    frameD = frameB.copy()
    boxes = convert_box(cnts)
    boxes_nms = non_max_suppression(boxes, 0.3)
    box_max = find_max(boxes_nms)

    if len(box_max):
      box_size = box_max[4]
      box_center = box_max[5:7]
      overlap = find_overlap(box_max[0:4], box_o)
      cv2.rectangle(frameC, (box_max[0], box_max[1]), (box_max[2], box_max[3]), (0, 0, 255), 2)
      cv2.rectangle(frameC, (overlap[0], overlap[1]), (overlap[2], overlap[3]), (0, 255, 0), 2)
      cv2.imwrite('/home/cheer/video_test/corre/data/result/t0/' + '{:05}'.format(pos) + '.jpg', frameA[overlap[1]:overlap[3], overlap[0]:overlap[2]])
      cv2.imwrite('/home/cheer/video_test/corre/data/result/t1/' + '{:05}'.format(pos) + '.jpg', frameD[overlap[1]:overlap[3], overlap[0]:overlap[2]])
      image1 = '/home/cheer/video_test/corre/data/result/t0/' + '{:05}'.format(pos) + '.jpg'
      image2 = '/home/cheer/video_test/corre/data/result/t1/' + '{:05}'.format(pos) + '.jpg'
      clip_class = classification_placeholder_input(feature_extractor, image1, image2, 'Logits',1, 6)
      cv2.putText(frameC, label_name[clip_class] , (25,232), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,255), 3)
      cv2.imwrite('/home/cheer/video_test/corre/data/result/t2/' + '{:05}'.format(pos) + '.jpg', frameC)
      box_o = box_max[0:4]
    cv2.imshow("frame", frameC)
    frameA = frameB.copy()
    if cv2.waitKey(1) & 0xFF == ord('q'):
      break

  cap.release()
  cv2.destroyAllWindows()
コード例 #13
0
ファイル: evaluation.py プロジェクト: XiongweiWu/ActionNet
def main(path):
    count = [0 for _ in range(nums)]
    label_count = [0 for _ in range(nums)]
    per_accuracy = [0 for _ in range(nums)]
    TP = [0 for _ in range(nums)]
    TN = [0 for _ in range(nums)]
    FP = [0 for _ in range(nums)]
    FN = [0 for _ in range(nums)]
    recall = [0 for _ in range(nums)]
    precision = [0 for _ in range(nums)]
    f1 = [0 for _ in range(nums)]
    accuracy = [0 for _ in range(nums)]
    total = 0
    confusion = np.zeros((nums, nums), dtype=int)
    TN_matrix = np.zeros((nums, nums), dtype=int)
    with open(os.path.join(path, label_name)) as label_file:
        labels = label_file.readlines()

    labels = random.sample(labels, 1000)

    feature_extractor = FeatureExtractor(network_name='action_vgg_s',
                                         checkpoint_path=ck_path,
                                         batch_size=1,
                                         num_classes=num,
                                         preproc_func_name='action_vgg_s')
    feature_extractor.print_network_summary()

    for label in tqdm(labels):
        file_name = label.strip().split()[0]
        label_num = label.strip().split()[1]
        label_num = int(label_num)
        #label_num = convert_label(label_num)
        label_count[label_num] += 1
        image1 = os.path.join(path, parts[0], file_name)
        image2 = os.path.join(path, parts[1], file_name)
        clip_class = classification_placeholder_input(feature_extractor,
                                                      image1, image2,
                                                      'action_vgg_s/fc8', 1,
                                                      19)
        #clip_class = convert_label(clip_class)
        confusion[label_num][clip_class] += 1
        if clip_class == label_num:
            count[clip_class] += 1
            total += 1
    for i in range(len(label_count)):
        if label_count[i]:
            per_accuracy[i] = count[i] / label_count[i]
    for i in range(nums):
        TP[i] = confusion[i][i]
        FP[i] = np.sum(confusion[:, i]) - TP[i]
        FN[i] = np.sum(confusion[i]) - TP[i]
        TN[i] = len(labels) - TP[i] - FP[i] - FN[i]
        recall[i] = TP[i] / (TP[i] + FN[i])
        precision[i] = TP[i] / (TP[i] + FP[i])
        f1[i] = 2 * precision[i] * recall[i] / (precision[i] + recall[i])
        accuracy[i] = (TP[i] + TN[i]) / (TP[i] + TN[i] + FP[i] + FN[i])
    print(label_count)
    print(count)
    print(per_accuracy)
    print(total)
    print(total / len(labels))
    print(confusion)
    print('TP:', TP)
    print('FP:', FP)
    print('FN:', FN)
    print('TN:', TN)
    print('precision:', precision)
    print('recall:', recall)
    print('f1:', f1)
    print('accuracy:', accuracy)
コード例 #14
0
ファイル: main.py プロジェクト: lan2720/jail
def run_video(file_path=None):
    shutil.rmtree('tmp/')
    os.mkdir('tmp/')
    batch_size = 1
    with h5py.File('/data1/Project/TF_FeatureExtraction/features.h5','r') as f:
        criterion = f['resnet_v2_101']['logits'].value.squeeze(axis=1).squeeze(axis=1) #[N,d]
    feature_extractor = FeatureExtractor(
        network_name='resnet_v2_101',
        checkpoint_path='/data1/Project/TF_FeatureExtraction/checkpoints/resnet_v2_101.ckpt',
        batch_size=batch_size,
        num_classes=1001,
        preproc_func_name='inception',
        preproc_threads=2
    )
    FLAG = False
    keypoints_track = []
    if not file_path:
        file_path = 0
    cap = cv2.VideoCapture(file_path)
    out = cv2.VideoWriter('output.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 15, (int(cap.get(3)), int(cap.get(4))))
    while 1:
        t = cv2.getTickCount()
        # Read new image
        ret, frame = cap.read()
        #cv2.imwrite('screenshot.jpg', frame)
        if not ret:
            break
        # Output keypoints and the image with the human skeleton blended on it
        keypoints, output_image = openpose.forward(frame, True)
        #keypoints_track.append(keypoints)
        for i in range(keypoints.shape[0]):
            #boxes = body_parts_box(keypoints[i,:,:])
            box = upper_body_box(keypoints[i,:,:])
            if box is None:
                continue
            if box[0][0] <=0 or box[0][1] <= 0 or box[1][0] <= 0 or box[1][1] <=0:
                print(keypoints[i,:,:])
                FLAG = True
            cv2.imwrite('tmp/p%d.jpg'%i, frame[box[0][1]:box[1][1]+1, box[0][0]:box[1][0]+1, :])
            feature_data = feature_extraction_queue(feature_extractor, 'tmp/p%d.jpg'%i, 
                            ['resnet_v2_101/logits'], batch_size, num_classes=1001)
            feature = feature_data['resnet_v2_101/logits'].squeeze(axis=1).squeeze(axis=1)
            score = check_police(feature, criterion)
            if score >= 0.5: # is police
                color = (255, 0, 0)
            else:
                color = (0, 0, 255)
            shutil.rmtree('tmp/')
            os.mkdir('tmp/')
            cv2.rectangle(output_image, box[0], box[1], color, 2)
            #if score >= 0.5:
            #    cv2.imwrite('police_uniform/%s.jpg' % str(time.time()).replace('.', ''), frame[box[0][1]:box[1][1]+1, box[0][0]:box[1][0]+1, :])
            cv2.putText(output_image, "%.3f"%score, box[0], cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255,255,255))
            #cv2.drawContours(output_image, [box], 0, (0,0,255), 2)
            #for bb in boxes:
            #    if not bb:
            #        continue
            #    cv2.rectangle(output_image, bb[0], bb[1], (0, 0, 255), 2)
        #for i in range(keypoints.shape[0]):
        #    detect_action(keypoins[i,:,:])
        out.write(output_image)
        # Compute FPS
        t = (cv2.getTickCount() - t)/cv2.getTickFrequency()
                
        fps = 1.0 / t
        cv2.putText(output_image, "%.1f FPS"%fps, (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255))
        # Display the image
        cv2.imshow("output", output_image)
        if FLAG:
            key = cv2.waitKey(0)
            if key == ord(' '):
                FLAG = False
                continue
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    feature_extractor.close()
    cap.release()
    out.release()
    cv2.destroyAllWindows()
コード例 #15
0
 def process(self, bucket_name):
     feature_extractor = FeatureExtractor(self.x_training, self.test)
     train, test = feature_extractor.transform()
     feature_extractor.save_to_s3(bucket_name, 'titanic/column_transformer.pkl')
     return train, self.training[self.label], test