コード例 #1
0
def main(argv=None):
    if FLAGS.tiny:
        model = yolov3_tiny_3l.yolo_v3_tiny
    elif FLAGS.dense:
        model = dense_yolov3_v1.dense_yolo_v3
    else:
        model = yolo_v3.yolo_v3

    classes = load_names(FLAGS.class_names)

    # placeholder for detector inputs
    inputs = tf.placeholder(tf.float32, [None, FLAGS.size, FLAGS.size, 3],
                            "inputs")

    with tf.variable_scope('detector'):
        detections = model(inputs, len(classes), data_format=FLAGS.data_format)
        load_ops = load_weights(tf.global_variables(scope='detector'),
                                FLAGS.weights_file)

    # Sets the output nodes in the current session
    boxes = detections_boxes(detections)

    with tf.Session() as sess:
        sess.run(load_ops)
        freeze_graph(sess, FLAGS.output_graph)
        writer = tf.summary.FileWriter("logs/", sess.graph)
コード例 #2
0
def predict(a, path_weights):

	PATH = 'small_files/ASL'
	names = utils.load_names(PATH)

	# load yaml and create model
	#yaml_file = open(path_weights+'/cnn_model.yaml', 'r')
	#loaded_model_yaml = yaml_file.read()
	#yaml_file.close()
	#loaded_model = model_from_yaml(loaded_model_yaml)

	# load weights into new model
	#loaded_model.load_weights(path_weights+"/cnn_weights.h5")
	#print("Loaded model from disk")
	#a = a[:,:,0]

	loaded_model = cnn_models.load_pre_tune_model(len(names), path_weights)

	
	a = np.reshape(a, (1, a.shape[0], a.shape[1], 3))



	y, names, X = utils.load_data("data/224/RGB/test")
	
	#Convert class vectors to binary class matrices
	y = keras_utils.to_categorical(np.ravel(y), len(names))

	sgd = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
	loaded_model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
	
	X = np.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 3))

	#Normalize data
	print "Normalize data.."
	X = X.astype('float16')
	X /= 255

	#Evaluation of the model
	print "Evaluating the model.."
	scores = loaded_model.evaluate(X, y, verbose=1,  batch_size=64)
	print("Accuracy: %.2f%%" % (scores[1]*100))
	
	#sgd = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)

	#loaded_model.compile(loss='sparse_categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

	#print loaded_model.predict(a)
	print loaded_model.predict_classes(a)
	prediction = names[int(loaded_model.predict_classes(a))]
	print prediction

	return prediction
コード例 #3
0
def main(argv=None):
    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)

    config = tf.ConfigProto(
        gpu_options=gpu_options,
        log_device_placement=False,
        # inter_op_parallelism_threads=0,
        # intra_op_parallelism_threads=0,
        # device_count={"CPU": 6}
    )
    cap = cv2.VideoCapture(FLAGS.video_path)
    classes = utils.load_names(FLAGS.class_names)
    frozenGraph = utils.load_graph(FLAGS.frozen_model)
    boxes, inputs = utils.get_boxes_and_inputs_pb(frozenGraph)

    with tf.Session(graph=frozenGraph, config=config) as sess:
        while True:
            ret, frame = cap.read()
            if ret:
                t1 = time.time()
                frame1 = frame[:, :, ::-1]  # from BGR to RGB
                # frame1 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                print('\'BGR2RGB\' time consumption:', time.time() - t1)
                img_resized = utils.resize_cv2(
                    frame1, (FLAGS.size, FLAGS.size),
                    keep_aspect_ratio=FLAGS.keep_aspect_ratio)
                img_resized = img_resized[np.newaxis, :]
                t0 = time.time()
                detected_boxes = sess.run(
                    boxes,
                    feed_dict={inputs: img_resized
                               })  # get the boxes whose confidence > 0.005
                filtered_boxes = utils.non_max_suppression(
                    detected_boxes,
                    confidence_threshold=FLAGS.conf_threshold,
                    iou_threshold=FLAGS.iou_threshold)[
                        0]  # boxes' filter by NMS
                print('\'detection\' time consumption:', time.time() - t0)
                utils.draw_boxes_cv2(filtered_boxes, frame, classes,
                                     (FLAGS.size, FLAGS.size),
                                     FLAGS.keep_aspect_ratio)
                print('\n\n\n')
                cv2.imshow('frame', frame)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
            else:
                break

    cap.release()
    cv2.destroyAllWindows()
コード例 #4
0
def main(argv=None):
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)

    config = tf.ConfigProto(
        gpu_options=gpu_options,
        log_device_placement=False,
        # inter_op_parallelism_threads=0,
        # intra_op_parallelism_threads=0,
        # device_count={"CPU": 6}
    )

    classes = utils.load_names(FLAGS.class_names)
    input_size = (FLAGS.size, FLAGS.size)
    img_pathes = [path for path in os.listdir(FLAGS.input_imgpath)
                  if path.endswith(('.jpg', '.png', '.bmp'))]
    num_imgs = len(img_pathes)
    batch_size = FLAGS.batch_size
    img_list = []

    img_batch_all = np.zeros((num_imgs, FLAGS.size, FLAGS.size, 3))
    for k in range(num_imgs):
        img_array = cv2.imread(os.path.join(FLAGS.input_imgpath, img_pathes[k]))
        img_list.append(img_array)
        img_batch_all[k] = utils.resize_cv2(img_array, input_size)[:, :, ::-1]

    frozenGraph = utils.load_graph(FLAGS.frozen_model)
    boxes, inputs = utils.get_boxes_and_inputs_pb(frozenGraph)

    with tf.Session(graph=frozenGraph, config=config) as sess:
        for i in range(0, num_imgs, batch_size):
            if i < num_imgs - batch_size:
                img_batch = img_batch_all[i:i + batch_size]
            else:
                img_batch = img_batch_all[i:]

            detected_boxes = sess.run(boxes, feed_dict={inputs: img_batch})
            filtered_boxes = utils.non_max_suppression(detected_boxes,
                                                       confidence_threshold=FLAGS.conf_threshold,
                                                       iou_threshold=FLAGS.iou_threshold)
            for n, bboxes in enumerate(filtered_boxes):
                img = img_list[i + n]
                img_name = img_pathes[i + n]
                utils.draw_boxes_cv2(bboxes, img, classes, input_size, keep_aspect_ratio=FLAGS.keep_aspect_ratio)
                # cv2.imshow('image_{}'.format(img_name), img)
                cv2.imwrite(os.path.join(FLAGS.output_imgpath, 'out_' + img_name), img)
                print('{} has been processed !'.format(img_name))
                print('#'*20)
コード例 #5
0
def main(argv=None):
    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)

    config = tf.ConfigProto(
        gpu_options=gpu_options,
        log_device_placement=False,
        # inter_op_parallelism_threads=0,
        # intra_op_parallelism_threads=0,
        # device_count={"CPU": 6}
    )

    img = Image.open(FLAGS.input_img)
    if FLAGS.keep_aspect_ratio:
        img_resized = utils.letter_box_image(img, FLAGS.size, FLAGS.size, 128)
        img_resized = img_resized.astype(np.float32)
    else:
        img_resized = img.resize((FLAGS.size, FLAGS.size), Image.BILINEAR)
        img_resized = np.asarray(img_resized, dtype=np.float32)

    classes = utils.load_names(FLAGS.class_names)
    frozenGraph = utils.load_graph(FLAGS.frozen_model)

    boxes, inputs = utils.get_boxes_and_inputs_pb(frozenGraph)

    with tf.Session(graph=frozenGraph, config=config) as sess:
        t0 = time.time()
        detected_boxes = sess.run(boxes, feed_dict={inputs: [img_resized]})

    print("Predictions found in {:.2f}s".format(time.time() - t0))

    filtered_boxes = utils.non_max_suppression(
        detected_boxes,
        confidence_threshold=FLAGS.conf_threshold,
        iou_threshold=FLAGS.iou_threshold)[0]

    utils.draw_boxes(filtered_boxes, img, classes, (FLAGS.size, FLAGS.size),
                     FLAGS.keep_aspect_ratio)

    img.save(FLAGS.output_img)
コード例 #6
0
def main():
    #load back args, checkpoint and labels
    args = parse_args()
    gpu = args.gpu
    model = load_checkpoint(args.checkpoint)
    cat_to_name = load_names(args.category_names)
    img_path = args.filepath

    #load probabilites calculates
    probs, classes = predict(img_path, model, int(args.top_k), gpu)
    labels = [cat_to_name[str(index)] for index in classes]
    probability = probs
    print('File: ' + img_path)
    print(labels)
    print(probability)

    i = 0
    while i < len(labels):
        print("{} class returned at percentage {}".format(
            labels[i], probability[i]))
        #next iteration
        i += 1
コード例 #7
0
def main():
    model = load_checkpoint(args.checkpoint)
    category_names = load_names(args.category_names)
    top_k = int(args.top_k)
    devices = args.devices
    if devices == 'gpu':
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    else:
        device = torch.device('cpu')

    img_path = args.img_input
    top_prob, top_classes = predict(img_path, model, top_k)
    max_index = top_classes[0]
    names = [category_names[str(index)] for index in top_classes]
    flower_name = category_names[str(max_index)]

    i = 0
    while i < len(names):
        print("The probability for the category {} is {}".format(
            names[i], top_prob[i]))
        i += 1
    print("The results indicate that this flower is is most likely to be {}".
          format(flower_name))
    print("Predition done!")
コード例 #8
0
    logger.info("Loaded configuration file {}".format(args.cfg))
    logger.info("Running with config:\n{}".format(cfg))

    cfg.MODEL.arch_encoder = cfg.MODEL.arch_encoder.lower()
    cfg.MODEL.arch_decoder = cfg.MODEL.arch_decoder.lower()

    # absolute paths of model weights
    cfg.MODEL.weights_encoder = os.path.join(cfg.DIR,
                                             'encoder_' + cfg.TEST.checkpoint)
    cfg.MODEL.weights_decoder = os.path.join(cfg.DIR,
                                             'decoder_' + cfg.TEST.checkpoint)

    assert os.path.exists(cfg.MODEL.weights_encoder) and \
        os.path.exists(cfg.MODEL.weights_decoder), "checkpoint does not exitst!"

    # generate testing image list
    if os.path.isdir(args.imgs):
        imgs = find_recursive(args.imgs)
    else:
        imgs = [args.imgs]
    assert len(imgs), "imgs should be a path to image (.jpg) or directory."
    cfg.list_test = [{'fpath_img': x} for x in imgs]

    if not os.path.isdir(cfg.TEST.result):
        os.makedirs(cfg.TEST.result)

    colors = load_colors(cfg.DATASET.colors_file)
    names = load_names(cfg.DATASET.names_file)

    main(cfg, args.gpu)
コード例 #9
0
def main():
    from matplotlib import pyplot as plt

    parser = OptionParser()
    parser.add_option('--model',
                      dest='model',
                      default='twitter.model',
                      help='model filename')
    parser.add_option('--names',
                      dest='names',
                      default='data/names.json',
                      help='names filename')
    options = parser.parse_args()[0]

    names = load_names(options.names)
    model = Doc2Vec.load(options.model)

    print(len(list(model.docvecs)))

    influencers = [str(uid) for uid in json.load(open('influencers.json'))]
    influencers = [
        uid for uid in influencers
        if uid in model.docvecs and uid not in ignore_ids
    ]
    print(len(influencers))
    vectors = [model.docvecs[uid] for uid in influencers]
    tsne = TSNE()
    tsne.fit(vectors)
    print(tsne.kl_divergence_)
    positions = tsne.fit_transform(vectors)

    vectors_writer = csv.writer(open('vectors.csv', 'w'))
    positions_writer = csv.writer(open('positions.csv', 'w'))
    positions_writer.writerow(['name', 'id', 'x', 'y'])
    for uid, vector, position in zip(influencers, vectors, positions):
        name = names.get(int(uid), '')
        vectors_writer.writerow([name, uid] + list(vector))
        positions_writer.writerow([name, uid] + list(position))

    X = [[float(v) for v in row[2:]] for row in vectors]
    print(X[0])
    y = pdist(X)
    Z = hierarchy.linkage(y)
    print(hierarchy.fcluster(Z, 1))
    plt.xlabel('sample index')
    plt.ylabel('distance')
    hierarchy.dendrogram(Z)
    plt.savefig('hoge.pdf')

    for k in range(2, 31):
        kmeans = KMeans(n_clusters=k, n_init=1000, max_iter=1000).fit(X)
        print('{}\t{}'.format(k, kmeans.inertia_))

    k = 7
    kmeans = KMeans(n_clusters=k, n_init=10000, max_iter=1000).fit(X)
    writer = csv.writer(open('cluster.csv', 'w'))
    writer.writerow(['name', 'id', 'label', 'x'] +
                    ['y{}'.format(i) for i in range(k)])
    for uid, l, row in zip(influencers, kmeans.labels_, positions):
        name = names.get(int(uid), '')
        x, y = row
        cols = ['' for _ in range(k)]
        cols[l] = y
        writer.writerow([name, uid] + [l, x] + cols)

    clusters = {uid: int(l) for uid, l in zip(influencers, kmeans.labels_)}
    print(clusters)
    json.dump(clusters, open('clusters.json', 'w'))
コード例 #10
0
def main():
    parser = OptionParser()
    parser.add_option('-o', dest='filename', default='graph.gexf',
                      help='output filename')
    parser.add_option('--names', dest='names', default='data/names.json',
                      help='names filename')
    parser.add_option('--clusters', dest='clusters',
                      default='data/clusters.json',
                      help='clusters filename')
    parser.add_option('--start', dest='start', default='20110301',
                      help='start date (default=20110301)')
    parser.add_option('--stop', dest='stop', default='20130401',
                      help='stop date (default=20130401)')
    parser.add_option('--sampling', dest='sampling', type='float', default=1.0,
                      help='tweet sampling rate (default=1.0)')
    parser.add_option('--influencers', dest='n', type='int', default=None,
                      help='maximum number of influencers (default=inf)')
    parser.add_option('--mincount', dest='mincount', type='int', default=1,
                      help='minimum number of retweet (default=1)')
    parser.add_option('--mindegree', dest='mindegree', type='int', default=0,
                      help='minimum acceptable degree of nodes (default=0)')
    parser.add_option('--group', dest='group', default='month',
                      help='month or week (default=month)')
    options, args = parser.parse_args()

    names = load_names(options.names)
    clusters = load_clusters(options.clusters)
    start_date = ymd_to_datetime(options.start)
    stop_date = ymd_to_datetime(options.stop)

    tweets = load_tweet(*args)
    tweets = filter_by_datetime(tweets, start_date, stop_date)
    random.seed(0)
    tweets = sampling(tweets, options.sampling)

    if options.group == 'month':
        groups = [t.strftime('t%Y%m%d') for t
                  in months_between(start_date, stop_date)]
    else:
        groups = [t.strftime('t%Y%m%d') for t
                  in weeks_between(start_date, stop_date)]

    graph = nx.DiGraph()
    graph.graph['groups'] = groups
    for ruid, tss in group_by_influencer(tweets, options.n, options.mincount):
        for uid, ts in group_by_user(tss):
            ts = list(ts)
            labels = {d: False for d in groups}
            for t in ts:
                d = to_datetime(t['created_at']).strftime('t%Y%m%d')
                labels[groups[bisect_right(groups, d) - 1]] = True
            graph.add_edge(ruid, uid, weight=len(ts), **labels)

    for node in graph.nodes():
        if graph.degree(node, 'weight') < options.mindegree:
            graph.remove_node(node)
            continue
        graph.node[node]['label'] = names.get(node, '')
        graph.node[node]['cluster'] = clusters.get(node, -1)
        if graph.out_degree(node) == 0:
            cluster_count = {}
            for ruid in graph.predecessors(node):
                c = clusters.get(ruid, -1)
                if c not in cluster_count:
                    cluster_count[c] = 0
                cluster_count[c] += 1
            graph.node[node]['cluster'] = max(cluster_count.items(),
                                              key=lambda r: r[1])[0]
        for d in groups:
            graph.node[node][d] = False

    for d in groups:
        for u, v in graph.edges():
            if graph[u][v][d]:
                graph.node[u][d] = True
                graph.node[v][d] = True

    out_format = options.filename.split('.')[-1]
    if out_format == 'gexf':
        nx.write_gexf(graph, options.filename)
    else:
        obj = json_graph.node_link_data(graph)
        json.dump(obj, open(options.filename, 'w'))
    print('(|V|, |E|) = ({}, {})'.format(graph.number_of_nodes(),
                                         graph.number_of_edges()))