Exemple #1
0
def main(nw_src, gt_src, node_label_src, num_find, nclus, verbose=True, unique_seeds=False):
    start = time.time()
    gts = common.load_comms(gt_src)
    if len(gts) == 0:
        print "Error! No training communities of size >3 found. Cannot run Bespoke."
        return None
    if len(gts) < nclus:
        print "Error! Too few (<#patterns,",nclus,") training communities of size >3 found. Cannot run Bespoke."
        return None
    node_labels = common.load_labels(node_label_src)
    nw = common.load_SimpleNW_graph(nw_src)

    if len(nw.nodes()) != len(node_labels.keys()):
        print "Error! Number of labeled nodes does not match number of nodes in the graph. #NW nodes=", len(nw.nodes()),"#labeled nodes=", len(node_labels.keys())
        return None
    if verbose:
        print "Training..."
        sys.stdout.flush()
    ret_list = train.train(nw, gts, node_labels, nclus)

    if verbose:
        print "Training complete...\nBeginning extraction..."
        sys.stdout.flush()
    KM_obj, size_dist_per_group, seed_info_per_group = ret_list
    end_train = time.time()
    comms = get_comms(nw, num_find, seed_info_per_group, size_dist_per_group, KM_obj, node_labels, unique_seeds)
    end = time.time()
    tot_time, train_time = round(end-start,2),round(end_train-start,2)

    if verbose:
        print "Extraction complete."
        sys.stdout.flush()
    return comms, KM_obj, tot_time, train_time
Exemple #2
0
def serve():
    default_model_dir = '/media/mendel/detection-server/models'
    default_model = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite'
    default_labels = 'coco_labels.txt'

    parser = argparse.ArgumentParser()
    parser.add_argument('--model', help='.tflite model path',
                        default=os.path.join(default_model_dir,default_model))
    parser.add_argument('--labels', help='label file path',
                        default=os.path.join(default_model_dir, default_labels))
    parser.add_argument('--camera_idx', type=int, help='Index of which video source to use. ', default=1)
    parser.add_argument('--threshold', type=float, help='Detector threshold. ', default=0.7)
    parser.add_argument('--display', dest='display', action='store_true', help='Display object data. ')
    parser.set_defaults(display=False)
    args = parser.parse_args()

    print('Loading {} with {} labels.'.format(args.model, args.labels))
    interpreter = common.make_interpreter(os.path.join(default_model_dir, args.model))
    interpreter.allocate_tensors()
    labels = common.load_labels(os.path.join(default_model_dir, args.labels))

    # Get native camera resolution.
    cap = cv2.VideoCapture(args.camera_idx)
    camera_res = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    cap.release()

    with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
        # Start a thread to detect objects in camera frames.
        future = executor.submit(start_detector, args.camera_idx, interpreter,
            args.threshold, labels, camera_res, args.display)

        # Start other threads for the gprc server. 
        server = grpc.server(executor)
        detection_server_pb2_grpc.add_DetectionServerServicer_to_server(
            DetectionServerServicer(camera_res), server)
        server.add_insecure_port('[::]:50051')
        server.start()

        # Show the value returned by the executor.submit call.
        # This will wait forever unless a runtime error is encountered.
        future.result()

        server.stop(None)
Exemple #3
0
def main(argv):
    args = argparser().parse_args(argv[1:])

    tokenizer = tokenization.FullTokenizer(
        vocab_file=args.vocab_file,
        do_lower_case=args.do_lower_case
    )
    label_list = load_labels(args.labels)
    label_map = { l: i for i, l in enumerate(label_list) }

    examples = []
    for x, y in tsv_generator(args.input_file, tokenizer, label_map, args):
        examples.append(Example(x, y))
        if args.max_examples and len(examples) >= args.max_examples:
            break

    write_examples(examples, args.output_file)

    return 0
Exemple #4
0
def main(argv):
    print_versions()
    args = argument_parser('train').parse_args(argv[1:])

    args.train_data = args.train_data.split(',')
    if args.checkpoint_steps is not None:
        os.makedirs(args.checkpoint_dir, exist_ok=True)

    strategy = MirroredStrategy()
    num_devices = strategy.num_replicas_in_sync
    # Batch datasets with global batch size (local * GPUs)
    global_batch_size = args.batch_size * num_devices

    tokenizer = get_tokenizer(args)

    label_list = load_labels(args.labels)
    label_map = { l: i for i, l in enumerate(label_list) }
    inv_label_map = { v: k for k, v in label_map.items() }

    if args.task_name not in (["NER","RE"]):
        raise ValueError("Task not found: {}".format(args.task_name))

    if args.train_data[0].endswith('.tsv'):
        if len(args.train_data) > 1:
            raise NotImplementedError('Multiple TSV inputs')

        train_data = TsvSequence(args.train_data[0], tokenizer, label_map,
                                global_batch_size, args)
        input_format = 'tsv'
    elif args.train_data[0].endswith('.tfrecord'):
        train_data = train_tfrecord_input(args.train_data, args.max_seq_length,
                                          global_batch_size)
        input_format = 'tfrecord'
    else:
        raise ValueError('--train_data must be .tsv or .tfrecord')

    if args.dev_data is None:
        dev_x, dev_y = None, None
        validation_data = None
    else:
        dev_x, dev_y = load_dataset(args.dev_data, tokenizer,
                                    args.max_seq_length,
                                    label_map, args)
        validation_data = (dev_x, dev_y)

    print('Number of devices: {}'.format(num_devices), file=sys.stderr, 
          flush=True)
    if num_devices > 1 and input_format != 'tfrecord':
        warning('TFRecord input recommended for multi-device training')

    num_train_examples = num_examples(args.train_data)
    num_labels = len(label_list)
    print('num_train_examples: {}'.format(num_train_examples),
          file=sys.stderr, flush=True)

    with strategy.scope():
        model = restore_or_create_model(num_train_examples, num_labels, 
                                        global_batch_size, args)
    model.summary(print_fn=print)

    callbacks = []
    if args.checkpoint_steps is not None:
        callbacks.append(ModelCheckpoint(
            filepath=os.path.join(args.checkpoint_dir, CHECKPOINT_NAME),
            save_freq=args.checkpoint_steps
        ))
        callbacks.append(DeleteOldCheckpoints(
            args.checkpoint_dir, CHECKPOINT_NAME, args.max_checkpoints
        ))

    if input_format == 'tsv':
        other_args = {
            'workers': 10,    # TODO
        }
    else:
        assert input_format == 'tfrecord', 'internal error'
        steps_per_epoch = int(np.ceil(num_train_examples/global_batch_size))
        other_args = {
            'steps_per_epoch': steps_per_epoch
        }

    model.fit(
        train_data,
        epochs=args.num_train_epochs,
        callbacks=callbacks,
        validation_data=validation_data,
        validation_batch_size=global_batch_size,
        **other_args
    )

    if validation_data is not None:
        probs = model.predict(dev_x, batch_size=global_batch_size)
        preds = np.argmax(probs, axis=-1)
        correct, total = sum(g==p for g, p in zip(dev_y, preds)), len(dev_y)
        print('Final dev accuracy: {:.1%} ({}/{})'.format(
            correct/total, correct, total))

    if args.model_dir is not None:
        print('Saving model in {}'.format(args.model_dir))
        save_model_etc(model, tokenizer, label_list, args)
    
    return 0
Exemple #5
0
def main():
    default_model_dir = '/media/mendel/detection-server/models'
    default_model = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite'
    default_labels = 'coco_labels.txt'
    default_capture_labels = ['person', 'dog', 'cat']
    default_capture_dir = '/media/mendel/detection-server/images'

    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='.tflite model path',
                        default=os.path.join(default_model_dir, default_model))
    parser.add_argument('--labels',
                        help='Label file path.',
                        default=os.path.join(default_model_dir,
                                             default_labels))
    parser.add_argument('--images',
                        help='Captured image path.',
                        default=default_capture_dir)
    parser.add_argument('--capture',
                        nargs='+',
                        type=str,
                        help='Labels to capture.',
                        default=default_capture_labels)
    parser.add_argument('--num_samples',
                        type=int,
                        help='Number of samples to capture.',
                        default=100)
    parser.add_argument('--camera_idx',
                        type=int,
                        help='Index of which video source to use.',
                        default=1)
    parser.add_argument('--threshold',
                        type=float,
                        help='Detector threshold.',
                        default=0.1)
    parser.add_argument('--frame_rate',
                        type=float,
                        help='Capture frame rate.',
                        default=2.0)
    parser.add_argument('--display',
                        dest='display',
                        action='store_true',
                        help='Display object data.')
    parser.set_defaults(display=False)
    parse_args = parser.parse_args()

    print('Loading {} with {} labels.'.format(parse_args.model,
                                              parse_args.labels))
    interpreter = common.make_interpreter(
        os.path.join(default_model_dir, parse_args.model))
    interpreter.allocate_tensors()
    labels = common.load_labels(
        os.path.join(default_model_dir, parse_args.labels))

    # Get native camera resolution.
    cap = cv2.VideoCapture(parse_args.camera_idx)
    camera_res = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(
        cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    cap.release()

    # Create named directories for captured images.
    for name in parse_args.capture:
        dir = os.path.join(parse_args.images, name)
        try:
            os.mkdir(dir)
            print('Created {}'.format(dir))
        except FileExistsError:
            pass

    capture(parse_args, interpreter, labels, camera_res)