示例#1
0
def service():
    if request.method == 'POST':
        file = request.files['file']
        file.save('image_test.jpg')

        # Car model classification
        brand, model, veh_type = predict('image_test.jpg')

        #Car plate detection
        detect('image_test.jpg')

        #Car plate recognition
        text, prob = recognize('X000XX000.jpg')
        response = {
            "brand": brand,
            "model": model,
            "probability": prob,
            "veh_type": veh_type,
            "coord": "[(398,292),(573,360)]",
            "id": "0001",
            "plate": text
        }
        response = json.dumps(response, ensure_ascii=False)

        return Response(response=response,
                        status=200,
                        mimetype="application/json")
    return render_template("service.html")
示例#2
0
def detect(video_path, candidate_save_path, mq: Queue, cfg):
    """
    run object detection algorithm
    :param video_path: video stream save path
    :param candidate_save_path: the region candidates
    :param mq: process communication pipe in which alg will read the newest stream index
    :param cfg: video configuration
    :return:
    """
    try:
        detection.detect(video_path, candidate_save_path, mq, cfg)
        return True
    except Exception as e:
        traceback.print_exc()
        logger.error(e)
示例#3
0
def on_data():
    global cancel_alarm
    global alarm_countdown
    if not alarm_countdown:
        raw_readings = request.data.decode()
        readings_arr = raw_readings.split('\n')[:-1]

        for i, reading in enumerate(readings_arr):
            readings_arr[i] = reading.split(' ')
        float_readings = np.array(readings_arr).astype(np.float)

        N = 40
        Ts = 1.0 / 40
        (xf, yf_plt, inte) = fft.fft_transform(float_readings, N=N, Ts=Ts)

        plt.xlabel('Frequency')
        plt.ylabel('Power')
        plt.ylim(0, 1)
        plt.plot(xf, yf_plt)
        plt.pause(0.2)
        plt.clf()

        cache.update('acc', float_readings)

        seizure = detection.detect(yf_plt, threshold=4, amp_thresh=0.6)
        print(seizure)
        if seizure:
            threading.Timer(check_seconds, alarm).start()
            alarm_countdown = True
            return make_response('not ok')
        else:
            return make_response('ok')
    else:
        return make_response('countdown')
示例#4
0
def run_single_test(data_dir, output_dir):
    from detection import train_detector, detect
    from keras import backend as K
    from keras.models import load_model
    from os import environ
    from os.path import abspath, dirname, join

    train_dir = join(data_dir, 'train')
    test_dir = join(data_dir, 'test')

    train_gt = read_csv(join(train_dir, 'gt.csv'))
    train_img_dir = join(train_dir, 'images')
    
    print('training')
    train_detector(train_gt, train_img_dir, fast_train=True)

    code_dir = dirname(abspath(__file__))
    model = load_model(join(code_dir, 'model_e300.hdf5'))
    test_img_dir = join(test_dir, 'images')
    print('detecting')
    detected_points = detect(model, test_img_dir)
    save_csv(detected_points, join(output_dir, 'output.csv'))

    if environ.get('KERAS_BACKEND') == 'tensorflow':
        K.clear_session()
def nutritionExtract(request):
    if request.method == 'POST':
        new_file = UploadFile(file=request.FILES['image'])
        new_file.save()
        name = new_file.file.name
        response = detect(name, False)
        return JsonResponse(response)
示例#6
0
def pretraining():
    if (request.method == 'POST'):
        import detection
        response = request.get_json()
        #extract filename
        filename = response.get('value')
        #current os username
        user_name = getpass.getuser()
        print('accessing the user:'******'/Users/' + user_name + '/Desktop/' + filename
        print('the file url is: ' + image_url)
        predictions = detection.detect(image_url)

        for x in predictions:
            print(x[2] * 100)
            xaxis.append(x[2] * 100)

        for y in predictions:
            print(y[1])
            yaxis.append(y[1])

        #killing the script to save memory after processing
        detection.kill()
        #send back the two dimensional array
        return 'done'
示例#7
0
def detect_and_show(filename):
    with open(os.path.join(_DATA_DIR, filename)) as in_file:
        values = in_file.read().split()
        input_signal = [float(x) for x in values]
    result = detection.detect(input_signal, 2000)
    plot_signal_with_peaks(input_signal, result)
    pp.show()
示例#8
0
def show_peaks(filename, show_file_peaks=False, end_sec=None):
    signal = sig.read(filename, end_sec=end_sec)
    peaks = detection.detect(signal.values, signal.sampling_rate)
    plot_signal_with_peaks(signal.values, peaks, "r")
    if signal.peaks and show_file_peaks:
        plot_vlines(signal.peaks, "g")
    pp.show()
示例#9
0
def process_inputframes(frames_path, nth=1, duration=10):
    frame_files = sorted(glob.glob(frames_path + '/*'))
    num_frames = len(frame_files)
    print(
        'Detecting and recognizing text from {} frames for every {}th frame: {}'
        .format(num_frames, nth, str(datetime.now())))

    entries = []
    for f_index, filename in tqdm(enumerate(frame_files), total=num_frames):
        if (f_index % nth == 0):
            boxes, scores = detection.detect(filename)

            if scores.shape[0] != 0:
                texts = recognition.recognize(filename, boxes)
                for index, box in enumerate(boxes):
                    entry = {}
                    entry['f_index'] = f_index
                    entry['time_stamp'] = '{:2.2f}'.format(
                        f_index / num_frames * duration)
                    entry['text'] = texts[index]
                    entry['bbox'] = [
                        box[0], box[1], box[2] - box[0], box[3] - box[1]
                    ]
                    entry['score'] = scores[index].item()
                    entries.append(entry)
    return entries
示例#10
0
def rela_coords():
    cap = cv2.VideoCapture(1)
    ret, image = cap.read()
    #cv2.imshow("Image", image)
    # cv2.waitKey(0)

    if not ret:
        raise Exception('Camera initialization failed.')

    robot, corners, sodas, milks, res_image = detection.detect(image)
    #cv2.imshow("detection", res_image)
    #cv2.waitKey(0)
    # if there are any problems return dummy map, so that robot will move
    if len(robot[0]) != 2 or len(robot[1]) != 2 or len(corners) != 2:
        dummy_corners = [[50, 50], [-50, -50]]
        dummy_sodas = []
        dummy_milks = [[0, 10]]
        #cv2.destroyAllWindows()
        #cap.release()
        return [len(dummy_milks), len(dummy_sodas)
                ], np.array(dummy_corners + dummy_milks + dummy_sodas,
                            dtype='int32')

    corners, sodas, milks = align_to_robot(robot, corners, sodas, milks)
    cv2.destroyAllWindows()
    cap.release()

    #print(np.array([*corners, *milks, *sodas], dtype='int32'))
    return [len(milks), len(sodas)], np.array([*corners, *milks, *sodas],
                                              dtype='int32')
示例#11
0
def detect_wrap(img_dir,
                clf_path=os.path.dirname(os.path.realpath(__file__)),
                config_path=os.path.dirname(
                    os.path.realpath(__file__)) + '/config.yaml',
                thresh=0.5):

    with open(config_path, 'r') as f:
        config = yaml.load(f)

    bbs = detect(data_path=img_dir,
                 write_path=clf_path,
                 target_width=config['target_width'],
                 target_height=config['target_height'],
                 x_stride=config['target_stride_x'],
                 y_stride=config['target_stride_y'],
                 thresh=thresh,
                 n_images=-1,
                 flag_rgb=config['flag_rgb'],
                 flag_usemask=False,
                 thresh_mask=config['thresh_mask'],
                 nms_thresh=config['nms_threshhold'],
                 flag_det_rot_aug=config['flag_det_rot_aug'],
                 )

    return bbs
示例#12
0
def language_translate_v2():

	source = request.args.get('source', '')
	target = request.args.get('target', '')
	text = request.args.get('q', '')

	if len(source) == 0 or len(target) == 0:
		data = detect(text)
	else:
		data = translate(source, target, text)
	
	return jsonify(data = data)
示例#13
0
def capture():
    cap = cv2.VideoCapture(3)

    while True:

        _, frame = cap.read()
        print(_)
        img = crop(frame)
        color2 = detect(img)
        #cv2.imshow("1",frame)
        #cv2.imshow("2",img)
        #cv2.waitKey(0)
        return color2
示例#14
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-c', '--config', default=['config.ini'])
    parser.add_argument('-t',
                        '--data_type',
                        nargs='+',
                        default=['train', 'val'])
    parser.add_argument('-d', '--delete', action='store_true')
    parser.add_argument('--train', type=str2bool, default='f')
    parser.add_argument('--steps', type=int, default=200000)
    parser.add_argument('--batch_size', type=int, default=32)
    parser.add_argument('--gradient_norm', type=float, default=5.0)
    parser.add_argument('--optimizer_name', type=str, default='adam')
    parser.add_argument('--save_secs', type=int, default=1000)
    parser.add_argument('--summary_secs', type=int, default=100)
    parser.add_argument('--logging_level', default='INFO')
    parser.add_argument('--task', type=int, default=0)
    parser.add_argument('--probability_threshold', type=float, default=0.3)
    parser.add_argument('--iou_threshold', type=float, default=0.4)

    args = parser.parse_args()
    if args.logging_level:
        tf.logging.set_verbosity(args.logging_level)
    config = configparser.ConfigParser()
    load_config(config, args.config)

    model_name = config.get('config', 'model')

    base_dir = os.path.expanduser(config.get('config', 'basedir'))
    anchor_info = pd.read_csv(
        os.path.join(base_dir, config.get('cache', 'anchor'))).values

    if args.train:
        tf.logging.info('Training')
        train(config, args, anchor_info, model_name)
    else:
        tf.logging.info('Object detecting')
        detect(config, args, anchor_info, model_name)
示例#15
0
def detectMainFace(imageName, isPath):
    model = "src/deploy.prototxt.txt"  # model-definition
    weights = "src/res10_300x300_ssd_iter_140000.caffemodel"  # pre-trained weights
    image = imageName  # image name reqd. images are loaded as 3D matrix - (h x w x c)

    # send for face detection
    colorImage, grayImage, mainFaceBox = detection.detect(
        model, weights, image, isPath)

    # crop the misaligned face from the whole image
    mainFaceGray = grayImage[mainFaceBox[2]:mainFaceBox[3],
                             mainFaceBox[0]:mainFaceBox[1]]
    mainFaceColor = colorImage[mainFaceBox[2]:mainFaceBox[3],
                               mainFaceBox[0]:mainFaceBox[1]]

    return colorImage, mainFaceColor, mainFaceGray, mainFaceBox
示例#16
0
def detect(primaryName, primaryNumber, secondaryNumber, data):
    command = data[0]
    tmax = data[1]
    tmin = data[2]
    stringCmd = 'Command: ' + command
    stringHMax = 'HMax: ' + tmax
    stringHMin = 'HMin: ' + tmin
    print(stringCmd)
    print(stringHMax)
    print(stringHMin)

    if (tmax == '' or tmin == ''):
        tmax = '30'
        tmin = '29'

    #detection.detect('Jordan','8329300230','9152273680', int(tmax), int(tmin))
    # return detection.detect(primaryName, primaryNumber, secondaryNumber, int(tmax), int(tmin))
    return detection.detect(primaryName, primaryNumber, secondaryNumber,
                            int(tmax))
示例#17
0
def run_single_test(data_dir, output_dir):
    from detection import train_detector, detect
    from keras import backend as K
    from keras.models import load_model
    from os.path import abspath, dirname, join
    train_dir = join(data_dir, 'train')
    test_dir = join(data_dir, 'test')

    train_gt = read_csv(join(train_dir, 'gt.csv'))
    train_img_dir = join(train_dir, 'images')

    model = train_detector(train_gt, train_img_dir, fast_train=True)

    code_dir = dirname(abspath(__file__))
    model = load_model(join(code_dir, 'facepoints_model.hdf5'))
    test_img_dir = join(test_dir, 'images')
    detected_points = detect(model, test_img_dir)
    save_csv(detected_points, join(output_dir, 'output.csv'))

    K.clear_session()
def detection():
    # initialize the data dictionary that will be returned from the
    # view
    data = {"success": False}

    # ensure an image was properly uploaded to our endpoint
    if flask.request.method == "POST":
        if flask.request.files.get("image"):
            # read the image in PIL format
            image = flask.request.files["image"].read()
            npimg = np.fromstring(image, np.uint8)
            img = cv2.imdecode(npimg, cv2.IMREAD_COLOR)
            img = detect(img)
            # cv2.imshow("image", img)
            # cv2.waitKey(0)
            # cv2.destroyAllWindows()
            retval, buffer = cv2.imencode('.jpg', img)
            img_base64 = base64.b64encode(buffer).decode('utf-8')
            data['success'] = True
            data['image'] = str(img_base64)
    return jsonify(data)
def detect_singlescale_wrap(config, thresh, dataset):

    if dataset == 'train':
        sub_path = config['detect_train_set']
    elif dataset == 'valid':
        sub_path = config['detect_valid_set']
    elif dataset == 'test':
        sub_path = config['detect_test_set']

    return detect(data_path=os.path.join(config['data_path'], sub_path),
                  write_path=config['write_path'],
                  thresh=thresh,
                  target_width=config['target_width'],
                  target_height=config['target_height'],
                  x_stride=config['target_stride_x'],
                  y_stride=config['target_stride_y'],
                  n_images=-1,
                  flag_rgb=config['flag_rgb'],
                  flag_usemask=config['flag_usemask'],
                  thresh_mask=config['thresh_mask'],
                  nms_thresh=config['nms_threshhold'],
                  flag_det_rot_aug=config['flag_det_rot_aug'])
示例#20
0
def read_signal_with_peaks(filename):
    second_count = 0
    sampling_rate = 2000
    signal_size = int(second_count * sampling_rate)
    with open(os.path.join(_DATA_DIR, filename)) as in_file:
        lines = in_file.readlines()
    signal = []
    peaks = []
    for index, line in enumerate(lines):
        if signal_size and index >= signal_size:
            break
        if not line or line == '\n':
            continue
        value, is_peak = line.split()
        signal.append(float(value))
        is_peak = bool(int(is_peak))
        if is_peak:
            peaks.append(index)
    my_peaks = detection.detect(signal, sampling_rate)

    pp.plot(signal)
    plot_vlines(peaks, "g")
    plot_vlines(my_peaks, "r")
    pp.show()
示例#21
0
def detection():
    # initialize the data dictionary that will be returned from the
    # view
    data = {"success": False}

    # ensure an image was properly uploaded to our endpoint
    if flask.request.method == "POST":
        if flask.request.files.get("image"):
            # read the image in PIL format
            image = flask.request.files["image"].read()
            npimg = np.fromstring(image, np.uint8)
            img = cv2.imdecode(npimg, cv2.IMREAD_COLOR)
            img = detect(img)
            cv2.imshow("image", img)
            cv2.waitKey(0)
            cv2.destroyAllWindows()
            img = Image.fromarray(img.astype("uint8"))
            rawBytes = io.BytesIO()
            img.save(rawBytes, "JPEG")
            rawBytes.seek(0)
            img_base64 = base64.b64encode(rawBytes.read())
            data['success'] = True
            data['image'] = str(img_base64)
    return jsonify(data)
示例#22
0
def infer_save_frames(input_frames_path, out_frames_path, every_nth=1):
    frame_files = sorted(glob.glob(input_frames_path + '/*'))

    num_frames = len(frame_files)
    detect_time = 0
    recognize_time = 0
    print('Detecting and recognizing text from {} frames: {}'.format(
        num_frames, str(datetime.now())))
    wordBB = None
    score = None
    text = None

    for index, filename in tqdm(enumerate(frame_files), total=num_frames):
        out_name = out_frames_path + '/out_{0:04d}.png'.format(index)
        if (index % every_nth == 0):
            wordBB, score = detection.detect(filename)

            if score.shape[0] == 0:
                wordBB = None
                score = None
            else:
                text = recognition.recognize(filename, wordBB)

        utilities.save(filename, wordBB, text, out_name)
示例#23
0
    # VideoStream or VideoCapture object
    frame = vs.read()
    frame = frame[1] if args.get("video", False) else frame
    # check to see if we have reached the end of the stream
    if frame is None:
        break
    # resize the frame (so we can process it faster) and grab the
    # frame dimensions
    frame = imutils.resize(frame, width=1500) #resize the frame
    #frame=cv2.resize(frame,(1280,720))
    (H, W) = frame.shape[:2] # get the frame dimensions 
    image=frame.copy() # take a copy from the frame
    #frame=cv2.GaussianBlur(frame,(3,3),1)
    #frame=cv2.blur(frame,(3,3))

    bboxes,masks,scores=detect(frame) # detect solar panels on the region of interestadjust_gamma
    conf_box_count=0
    cur_frame_axies = []
    cur_frame_label = []
    for idx,box in enumerate(bboxes):
        x=box[0]
        y=box[1]
        W=box[2]-box[0]
        H=box[3]-box[1]
        aspect_ratio=H/W
        area=H*W
        if(scores[idx]>= 0.995 and area >6000 and (0.7<=aspect_ratio<=2.2) ) :
            conf_box_count+=1
    label_list=list(ref_frame_label_deq)
    axies_list=list(ref_frame_axies_deq)
    ref_frame_label=[item for sublist in label_list for item in sublist]
import cv2
import argparse
from preprocess import img_preprocess
from detection import detect
from recognition import recognition_card

parser = argparse.ArgumentParser(description='Image Path')
parser.add_argument('path', type=str)
args = parser.parse_args()

img_file = args.path
img_rgb = cv2.imread(img_file)  #图像读入

if __name__ == '__main__':
    img_card = img_preprocess(img_rgb)
    detect_imgs = detect(img_card)
    lens = len(detect_imgs)
    t = '0123456789-'
    for num, detect_img in enumerate(detect_imgs):
        result = recognition_card(detect_img)
        # post-preprocess (reject some impossible character)
        if lens == 3:
            result_3 = result
            if num == 0:
                result = str(result).replace(':', '')

        if lens == 4:
            result_4 = result
            if num == 0 or num == 1:
                for i, item in enumerate(str(result_4)):
                    if item not in t:
示例#25
0
    # finding the corners of polygon in order
    box = [a[0][0], a[1][0], a[2][0], a[3][0]]
    box = sorted(box, key=lambda x: x[1])
    if box[0][0] > box[1][0]:
        box[0], box[1] = box[1], box[0]
    if box[2][0] > box[3][0]:
        box[2], box[3] = box[3], box[2]
    i = 1
    box = np.array(box)
    box += np.array([[i, i], [-i, i], [i, -i], [-i, -i]])

    # Cropping
    pts1 = np.float32(box)
    pts2 = np.float32([[0, 0], [480, 0], [0, 480], [480, 480]])
    M = cv2.getPerspectiveTransform(pts1, pts2)
    dst = cv2.warpPerspective(img, M, (480, 480))

    return dst


if __name__ == "__main__":
    c = cv2.imread("shot1.jpg")
    z = crop(c)
    color, intensity = detect(z)
    print(color)
    print(intensity)
    cv2.imshow("image", z)
    cv2.imshow("img", c)
    cv2.waitKey(0)
示例#26
0
                    if (truths[bbrect_i]==0) and \
                    isintersect(sorted_bb[bbrect_i,:4],gtrect):
                        truths[bbrect_i]=1
                        break
            all_measures = hstack((all_measures,measures))
            all_truths = hstack((all_truths,truths))
    if (all_measures.shape[0]==0):
        return 0.0
    
    #Call with plot=True to plot Precision-Recall curve
    return compute_auc(all_truths,all_measures,gt_count,plot=False)


if len(argv) != 3:
    stdout.write('Usage: %s train_dir test_dir\n' % argv[0])
    exit(1)

train_dir = argv[1]
test_dir = argv[2]

train_imgs, train_gt = load_gt(train_dir)
test_imgs, test_gt = load_gt(test_dir)

model = train_detector(train_imgs, train_gt)
bboxes = []
for img in test_imgs:
    bboxes.append(array(detect(model, img)))

stdout.write('%.2f\n' % compute_metrics(bboxes, test_gt))

示例#27
0
SLEEPNUM = 0.4


# Pass in x, y coordinates for mouse to click
# only works in windows
def click(x, y):
    win32api.SetCursorPos((x, y))
    win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x, y, 0, 0)
    win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x, y, 0, 0)


while currentActions < desiredScore:
    # take screenshot and save it in desired location
    secreenshot(emulatorLocation['left'],
                emulatorLocation['top'],
                emulatorLocation['width'],
                emulatorLocation['height'],
                fileName=SCREENSHOT_FILENAME)

    # determine where center of ball is in screenshot
    centerX, centerY = detect(TARGET_FILENAME, SCREENSHOT_FILENAME)

    # if ball is near bottom half of screen, its okay to click
    # (prevents ball from traveling too far vertically)
    if centerY > (emulatorLocation['height'] - emulatorLocation['top']) / 2:
        click(centerX, centerY + PIXBUFFER)

        # sleep to prevent spamming
        time.sleep(SLEEPNUM)
        currentActions += 1
        print currentActions
示例#28
0
import cv2
from detection import detect
faceCas = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(1)  # try -1, 0 , 1 in the case if you can't see video
while True:
    ret, img = cap.read()
    img = detect(img, faceCas)
    cv2.imshow('img', img)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
cap.release()
cv2.destroyAllWindows()
示例#29
0
print('Show the first and last images of training dataset')
fig, ax = plt.subplots(1, 2)
ax[0].axis('off')
ax[0].set_title('Face')
ax[0].imshow(trainData[1][0], cmap='gray')
ax[1].axis('off')
ax[1].set_title('Non face')
ax[1].imshow(trainData[-1][0], cmap='gray')
plt.show()

# Part 2: Implement selectBest function in adaboost.py and test the following code.
# Part 3: Modify difference values at parameter T of the Adaboost algorithm.
# And find better results. Please test value 1~10 at least.
# print('Start training your classifier')
clf = adaboost.Adaboost(T=1)
clf.train(trainData)

print('\nEvaluate your classifier with training dataset')
utils.evaluate(clf, trainData)

print('\nEvaluate your classifier with test dataset')
utils.evaluate(clf, testData)

# Part 4: Implement detect function in detection.py and test the following code.
print('\nDetect faces at the assigned lacation using your classifier')
detection.detect('data/detect/detectData.txt', clf)

# Part 5: Test classifier on your own images
print('\nDetect faces on your own images')
detection.detect('yourOwnImages', clf)
示例#30
0
today = date.today()
date_today = today.strftime("%d%m%Y")

print("List of Valid Operations:\n")
print("1. Create a database")
print("2. Add a student")
print("3. Train the model")
print("4. Scan face for attendance")
print("5. Check Attendance")
print("6. View Database")
to_do = int(input("\n Enter Valid number: "))

if to_do == 1:
    initialize.init_()
elif to_do == 2:
    detection.detect()
elif to_do == 3:
    train.train()
elif to_do == 4:
    recognition.recognise()
elif to_do == 5:
    conn = sqlite3.connect('database.db')
    cur = conn.cursor()
    roll_no = input("\nEnter Roll no. :")
    cur.execute("SELECT date FROM attendance WHERE _{s_roll_no}=1".format(
        s_roll_no=roll_no))
    # print(cur.fetchall())
    presents = len(cur.fetchall())
    cur.execute("SELECT * FROM attendance")
    total = len(cur.fetchall())
    print(str(presents) + '/' + str(total))
示例#31
0
def object_detection(event):
    frame = event.cv2img
    cv2.imwrite("image.jpg", frame)
    detection.detect("image.jpg")
示例#32
0
            pil_draw.ellipse(
                (pt2[0] - radius, pt2[1] - radius, pt2[0] + radius, pt2[1] + radius), fill=gt_color)
        pil_img.save(res_dir + '/' + impaths[i])

#if (len(argv) != 2) and (len(argv) != 4):
#    stdout.write('Usage: %s train_dir test_dir [-v results_dir]\n' % argv[0])
#    exit(1)
start_time = time.time()
train_dir = argv[1]
#test_dir = argv[2]
visualisation_needed = (len(argv) > 2) and (argv[2] == '-v')
if visualisation_needed:
    res_dir = argv[3]
if visualisation_needed:
	train_imgs, train_gt,test_paths = load_data(train_dir,True)
else:
	train_imgs, train_gt = load_data(train_dir)
	
X_train,X_test,y_train,y_test = cross_validation.train_test_split(train_imgs,train_gt, 
test_size = 0.1, train_size = 0.5,random_state = 2	)

model = train_detector(X_train, y_train)
del X_train,y_train
detection_results = np.array(detect(model, X_test))
print("Result: %.4f" % compute_metrics(X_test, detection_results, y_test))
if visualisation_needed:
    visualise(X_test, detection_results, y_test, res_dir, test_paths)
end_time = time.time()
print("Running time:", round(end_time - start_time, 2),
      's (' + str(round((end_time - start_time) / 60, 2)) + " minutes)")
示例#33
0
    return img_shapes


def compute_metric(detected, gt, img_shapes):
    res = 0.0
    for filename, coords in detected.items():
        n_rows, n_cols = img_shapes[filename]
        diff = (coords - gt[filename])
        diff[::2] /= n_cols
        diff[1::2] /= n_rows
        diff *= 100
        res += (diff**2).mean()
    return res / len(detected.keys())


train_gt = read_csv(join(train_dir, 'gt.csv'))
train_img_dir = join(train_dir, 'images')

model = train_detector(train_gt, train_img_dir, fast_train=False)
model.save('facepoints_model.hdf5')

#model = train_detector(train_gt, train_img_dir, fast_train=True)

model = load_model('facepoints_model.hdf5')
test_img_dir = join(test_dir, 'images')
detected_points = detect(model, test_img_dir)
test_gt = read_csv(join(test_dir, 'gt.csv'))
img_shapes = read_img_shapes(test_dir)
error = compute_metric(detected_points, test_gt, img_shapes)
print('Error: ', error)
        pil_img.save(res_dir + '/' + impaths[i])

if (len(argv) != 3) and (len(argv) != 5):
    stdout.write('Usage: %s train_dir test_dir [-v results_dir]\n' % argv[0])
    exit(1)
start_time = time.time()
train_dir = argv[1]
test_dir = argv[2]
visualisation_needed = (len(argv) > 3) and (argv[3] == '-v')
if visualisation_needed:
    res_dir = argv[4]

train_imgs = load_imgs(train_dir)
train_gt = load_gt(train_dir)
model = train_detector(train_imgs, train_gt)
del train_imgs, train_gt
if visualisation_needed:
    test_imgs = list(load_imgs(test_dir))
    test_gt = list(load_gt(test_dir))
    test_paths = list(load_paths(test_dir))
else:
    test_imgs = list(load_imgs(test_dir))
    test_gt = list(load_gt(test_dir))
detection_results = np.array(detect(model, test_imgs, test_gt))
print("Result: %.4f" % compute_metrics(test_imgs, detection_results, test_gt))
if visualisation_needed:
    visualise(test_imgs, detection_results, test_gt, res_dir, test_paths)
end_time = time.time()
print("Running time:", round(end_time - start_time, 2),
      's (' + str(round((end_time - start_time) / 60, 2)) + " minutes)")