Ejemplo n.º 1
0
def pre():
    
    mq135values=predict()
    mq135lables=[]
    for i in range(len(mq135values)):
        mq135lables.append(i)
    if request.method=="POST":
        return redirect(url_for('/Prediction'))
    return render_template('Ai_graph.html',pactive="active" ,mq135values = mq135values,mq135lables=mq135lables)
Ejemplo n.º 2
0
async def generate(ctx, filename: str, length: int):
    try:
        with open("models/" + filename + ".model", "r") as f:
            x = json.loads(f.read())
            model = x[0]
            raw = x[1]
    except IOError:
        await ctx.send("Error - Specified model does not exist.")
        return

    memsize = int(filename.split("-")[1])

    y = random.randint(0, len(raw) - memsize - 1)
    out = ai.predict(raw[y:y + memsize], length, model)
    if len(out) < 2000:
        await ctx.send(out)
    else:
        await ctx.send(
            "Error - texts larger than 2000 chars will be supported soon.")
Ejemplo n.º 3
0
    parser_train = subparsers.add_parser(
        'train',
        help=
        'trains the system to detect traffic lights in an image given training samples'
    )

    # command: predict; for predicting traffic lights given an input image
    parser_predict = subparsers.add_parser(
        'predict', help='attempts to find all traffic lights in an image')
    parser_predict.add_argument('--path', help='path to image', required=True)

    # command: mine; for mining positive and negative examples from a given input image
    parser_mine = subparsers.add_parser(
        'mine',
        help='tool for hard mining positive and negative training data')
    parser_mine.add_argument('--path', help='path to image', required=True)
    parser_mine.add_argument(
        '--use-predicted',
        help=
        'whether or not to use the boxes that were predicted as positive samples',
        action='store_true')

    args = parser.parse_args()

    if args.subcmd == 'train':
        ai.train()
    elif args.subcmd == 'predict':
        ai.predict(path.expanduser(args.path))
    elif args.subcmd == 'mine':
        ai.mine(path.expanduser(args.path), args.use_predicted)
Ejemplo n.º 4
0
    if data["training-file"] == "stdin":
        tdata = input()
    else:
        with open(data["training-file"], "r") as f:
            tdata = f.read()

    model = ai.train(tdata, int(data["max-history"]))
    if data["model-file"] == "stdout":
        print(json.dumps(model))
    else:
        with open(data["model-file"], "w") as f:
            f.write(json.dumps([model, tdata]))

elif sys.argv[1] == "predict":
    if data["model-file"] == "stdin":
        model = input()
    else:
        with open(data["model-file"], "r") as f:
            model = json.loads(f.read())[0]

    size = 8

    output = ai.predict("more in sorrow than in anger."[:size],
                        int(data["length"]), model)

    if data["output-file"] == "stdout":
        print(output)
    else:
        with open(data["output-file"], "w") as f:
            f.write(output)
Ejemplo n.º 5
0
def video_processing(video_path, background):
    face_mask_recognition_model = cv2.dnn.readNet(
        'models/face_mask_recognition.prototxt',
        'models/face_mask_recognition.caffemodel')

    mask_detector_model = ai.create_model()

    cap = cv2.VideoCapture(video_path)
    fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')

    frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    now_frame = 1

    if not os.path.exists('outputs'):
        os.mkdir('outputs')

    out = None

    colors = [(0, 255, 0), (0, 0, 255)]
    labels = ['with_mask', 'without_mask']

    # 1 프레임마다 반복
    while cap.isOpened():
        ret, image = cap.read()
        if not ret:
            break

        height, width = image.shape[:2]

        blob = cv2.dnn.blobFromImage(image,
                                     scalefactor=1.,
                                     size=(300, 300),
                                     mean=(104., 177., 123.))
        face_mask_recognition_model.setInput(blob)
        face_locations = face_mask_recognition_model.forward()

        result_image = image.copy()

        # 예상에 대해 적합도를 측정
        for i in range(face_locations.shape[2]):
            confidence = face_locations[0, 0, i, 2]
            if confidence < 0.5:
                continue

            # 얼굴의 위치값을 받아옴
            left = int(face_locations[0, 0, i, 3] * width)
            top = int(face_locations[0, 0, i, 4] * height)
            right = int(face_locations[0, 0, i, 5] * width)
            bottom = int(face_locations[0, 0, i, 6] * height)

            face_image = image[top:bottom, left:right]
            face_image = cv2.resize(face_image, dsize=(224, 224))
            face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB)

            predict = ai.predict(mask_detector_model, face_image)

            cv2.rectangle(result_image,
                          pt1=(left, top),
                          pt2=(right, bottom),
                          thickness=2,
                          color=colors[predict],
                          lineType=cv2.LINE_AA)

            cv2.putText(result_image,
                        text=labels[predict],
                        org=(left, top - 10),
                        fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=0.8,
                        color=colors[predict],
                        thickness=2,
                        lineType=cv2.LINE_AA)

        if out is None:
            out = cv2.VideoWriter('outputs/output.wmv', fourcc,
                                  cap.get(cv2.CAP_PROP_FPS),
                                  (image.shape[1], image.shape[0]))
        else:
            out.write(result_image)

        # (10/400): 11%, 영상 데이터 변환
        print('(' + str(now_frame) + '/' + str(frame_count) + '): ' +
              str(now_frame * 100 // frame_count) + '%')
        now_frame += 1

        # OPENCV 출력
        if not background:
            cv2.imshow('result', result_image)
            if cv2.waitKey(1) == ord('q'):
                break

    out.release()
    cap.release()
Ejemplo n.º 6
0
import time
import ai
import grapher
import numpy as np
import stats
import matplotlib as mpl
import matplotlib.pyplot as npl
#parser = argparse.ArgumentParser()
#group = parser.add_mutually_exclusive_group()
#group.add_argument('-v',help="Verbose mode: Print all activity on the command line",action="store_true")
#group.add_argument('-q',help="Quiet mode: run program as a daemon", action="store_true")
#parser.add_argument('-g',help="Display a time series or bar graph")

#parser.parse_args()
#print("Starting packet sniffer")
#s.start_sniffer()
#print("Capturing packets for 30s")
#time.sleep(30)
#print("Stopping sniffer")
#s.stop_sniffer()
#print("Dumping file for analysis")
#s.analysis_output()
print("Loading AI model")
mod = ai.model("ai_model.h5")
dataset = ai.dataset("analysis_output_2.csv")
dataformat = ai.dataformat(dataset)
print("predicting results")
results = ai.predict(dataformat[0], mod)
#print(results.shape)
stats.conf(dataformat[0])