def merge_folds(name, output_name):
    te_files = [
        output_dir + name + 'fold' + str(i) + '.npy' for i in range(10)
    ]
    pred = 0
    for file in te_files:
        pred += np.load(file)
    pred /= 10
    get_result(pred, output_name + '.txt')
Beispiel #2
0
def run(work_path):
    # 系统初始化,参数要与创建技能时填写的检验值保持一致
    hilens.init("driving")

    # 初始化自带摄像头与HDMI显示器,
    # hilens studio中VideoCapture如果不填写参数,则默认读取test/camera0.mp4文件,
    # 在hilens kit中不填写参数则读取本地摄像头
    camera = hilens.VideoCapture()
    display = hilens.Display(hilens.HDMI)

    # 初始化模型
    model_path = os.path.join(work_path, 'model/yolo3.om')
    driving_model = hilens.Model(model_path)

    frame_index = 0
    json_bbox_list = []
    json_data = {'info': 'det_result'}

    while True:
        frame_index += 1
        try:
            time_start = time.time()

            # 1. 设备接入 #####
            input_yuv = camera.read()  # 读取一帧图片(YUV NV21格式)

            # 2. 数据预处理 #####
            img_bgr = cv2.cvtColor(input_yuv,
                                   cv2.COLOR_YUV2BGR_NV21)  # 转为BGR格式
            img_preprocess, img_w, img_h = preprocess(img_bgr)  # 缩放为模型输入尺寸

            # 3. 模型推理 #####
            output = driving_model.infer([img_preprocess.flatten()])

            # 4. 获取检测结果 #####
            bboxes = get_result(output, img_w, img_h)

            # 5-1. [比赛提交作品用] 将结果输出到json文件中 #####
            if len(bboxes) > 0:
                json_bbox = convert_to_json(bboxes, frame_index)
                json_bbox_list.append(json_bbox)

            # 5-2. [调试用] 将结果输出到模拟器中 #####
            img_bgr = draw_boxes(img_bgr, bboxes)  # 在图像上画框
            output_yuv = hilens.cvt_color(img_bgr, hilens.BGR2YUV_NV21)
            display.show(output_yuv)  # 显示到屏幕上
            time_frame = 1000 * (time.time() - time_start)
            hilens.info('----- time_frame = %.2fms -----' % time_frame)

        except RuntimeError:
            print('last frame')
            break

    # 保存检测结果
    hilens.info('write json result to file')
    result_filename = './result.json'
    json_data['result'] = json_bbox_list
    save_json_to_file(json_data, result_filename)

    hilens.terminate()
Beispiel #3
0
def parse_sentence_xml(xml):
    print("Creating request")
    body = rqe_parse_sentence_xml.format(xml=xml).encode("utf-8")
    print("Request sent to server")
    response = requests.post(wsdl_file, data=body, headers=headers)
    print("Response received")
    return get_result(response)
Beispiel #4
0
def chunck_text(text):
    print("Creating request")
    body = rqe_chunck_text.format(text=text).encode("utf-8")
    print("Request sent to server")
    response = requests.post(wsdl_file, data=body, headers=headers)
    print("Response received")
    return get_result(response)
Beispiel #5
0
 def test_no_score_in_table_row(self):
     """
     without score in table row, result should be zero:
     """
     test_row = TableRow(point=0, coefficient=6)
     table_rows = [test_row]
     self.assertEqual(utils.get_result(table_rows), False)
Beispiel #6
0
def parse_pos_tagget_text(text):
    print("Creating request")
    body = rqt_parse_pos_tagged_text.format(xml=text).encode("utf-8")
    print("Request sent to server")
    response = requests.post(wsdl_file, data=body, headers=headers)
    print("Response received")
    return get_result(response)
Beispiel #7
0
 def test_no_score_in_table_row(self):
     """
     without score in table row, result should be zero:
     """
     test_row = TableRow(point=0, coefficient=6)
     table_rows = [test_row]
     self.assertEqual(utils.get_result(table_rows), False)
Beispiel #8
0
def parse_sentence_text(text):
    print("Creating request")
    body = rqe_parse_sentence_text.format(text=text)
    print("Request sent to server")
    response = requests.post(wsdl_file, data=body, headers=headers)
    print("Response received")
    return get_result(response)
Beispiel #9
0
def test_function_column_name(completer):
    for l in range(
            len('SELECT * FROM Functions WHERE function:'),
            len('SELECT * FROM Functions WHERE function:text') + 1
    ):
        assert [] == get_result(
            completer, 'SELECT * FROM Functions WHERE function:text'[:l]
        )
def test_function_column_name(completer):
    for l in range(
            len('SELECT * FROM Functions WHERE function:'),
            len('SELECT * FROM Functions WHERE function:text') + 1
    ):
        assert [] == get_result(
            completer, 'SELECT * FROM Functions WHERE function:text'[:l]
        )
def test_columns_before_keywords(completer):
    text = 'SELECT * FROM orders WHERE s'
    completions = get_result(completer, text)

    col = column('status', -1)
    kw = keyword('SELECT', -1)

    assert completions.index(col) < completions.index(kw)
def solve_links(text, language="ro"):
    validate_language(language)
    print("Creating request")
    body = rqt_solve_links.format(text=text, language=language).encode("utf-8")
    print("Request sent to server")
    response = requests.post(wsdl_file, data=body, headers=headers)
    print("Response received")
    return get_result(response)
Beispiel #13
0
def test_columns_before_keywords(completer):
    text = 'SELECT * FROM orders WHERE s'
    completions = get_result(completer, text)

    col = column('status', -1)
    kw = keyword('SELECT', -1)

    assert completions.index(col) < completions.index(kw)
Beispiel #14
0
def test():
    '''测试页面视图'''
    if request.is_xhr:
        answers = json.loads(request.values.get('answers'))
        result = get_result(answers)
        flash('测试完成,你的性格分析结果为{}型'.format(result))
        return result.lower()
    random.shuffle(QUESTIONS)
    return render_template('mbti/test.html', questions=QUESTIONS)
Beispiel #15
0
def parse_text(text):
    try:
        text = escape(text)
        print("Creating request")
        body = rqt_parse_text.format(text=text).encode("utf-8")
        print("Request sent to server")
        response = requests.post(wsdl_file, data=body, headers=headers)
        print("Response received")
        return get_result(response)
    except Exception as e:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
        print("Error at calling fdg_parser. Reason:", str(e), "File:", fname,
              "Line:", exc_tb.tb_lineno)
    def Inference(self, input_image):
        if isinstance(input_image, np.ndarray) is None:
            return False
        strattime = time.time()
        # Image PreProcess
        resized_image = cv2.resize(input_image, (self.width, self.height))
        print('[0] resize cost: ' + str((time.time() - strattime) * 1000) +
              'ms')
        strattime = time.time()
        inputImageTensor = hiai.NNTensor(resized_image)
        print('[1] input image cost: ' +
              str((time.time() - strattime) * 1000) + 'ms')
        strattime = time.time()
        nntensorList = hiai.NNTensorList(inputImageTensor)
        print('[2] get list cost: ' + str((time.time() - strattime) * 1000) +
              'ms')
        strattime = time.time()
        # 调用推理接口
        resultList = self.model.Inference(self.graph, nntensorList)
        print('[3] get result list cost: ' +
              str((time.time() - strattime) * 1000) + 'ms')
        strattime = time.time()
        if resultList is not None:
            bboxes = utils.get_result(resultList, self.width,
                                      self.height)  # 获取检测结果
            # print("bboxes:", bboxes)
            print('[4] get box cost: ' +
                  str((time.time() - strattime) * 1000) + 'ms')
            strattime = time.time()
            # Yolov_resnet18 Inference
            output_image = utils.draw_boxes(resized_image, bboxes)  # 在图像上画框
            print('[5] draw box cost: ' +
                  str((time.time() - strattime) * 1000) + 'ms')
            strattime = time.time()
            output_image = cv2.resize(
                output_image, (input_image.shape[1], input_image.shape[0]))
            print('[6] resize cost: ' + str((time.time() - strattime) * 1000) +
                  'ms')
            strattime = time.time()
            img_name = datetime.datetime.now().strftime("%Y-%m-%d%H-%M-%S-%f")
            cv2.imwrite('output_image/' + str(img_name) + '.jpg', output_image)
            print('[7] write cost: ' + str((time.time() - strattime) * 1000) +
                  'ms')
            strattime = time.time()

        else:
            print('no person in this frame.')
            return False

        return True
Beispiel #17
0
def home():
    # Create form
    form = ReusableForm(request.form)

    # On form entry and all conditions met
    if request.method == 'POST' and form.validate():
        question1 = request.form['question1']
        question2 = request.form['question2']
        return render_template('result.html',
                               input=get_result(model=model,
                                                question1=question1,
                                                question2=question2,
                                                stop_words=stop_words,
                                                vocab=vocab))
    # Send template information to index.html
    return render_template('index.html', form=form)
Beispiel #18
0
def run(input_data, output_data):
    """
    Face recognition algorithm

    :param input_data: path for the directory from which function will take file.
                In this case it's and image (.jpg, .jpeg, .png)
    :param output_data: none
    :return: a list with the probabilites
    """
    for input_file in input_data:

        # Disable scientific notation for clarity
        np.set_printoptions(suppress=True)

        # Load the model
        model = tensorflow.keras.models.load_model('keras_model.h5')

        # Create the array of the right shape to feed into the keras model
        # The 'length' or number of images you can put into the array is
        # determined by the first position in the shape tuple, in this case 1.
        data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)

        # Replace this with the path to your image
        image = Image.open(input_file)

        #resize the image to a 224x224 with the same strategy as in TM2:
        #resizing the image to be at least 224x224 and then cropping from the center
        size = (224, 224)
        image = ImageOps.fit(image, size, Image.ANTIALIAS)

        #turn the image into a numpy array
        image_array = np.asarray(image)

        # display the resized image
        #image.show()

        # Normalize the image
        normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1

        # Load the image into the array
        data[0] = normalized_image_array

        # run the inference
        prediction = model.predict(data)

        return get_result(prediction.tolist()[0])
Beispiel #19
0
  def get_usn(self, usn):
    self.usn = usn
    if len(usn) == 10:
      html = get_result(usn)
      valid = html.xpath('//td[@width="513"]/text()')
      if 'not' in valid:
        print "Invalid USN"
      else:
        subjects = html.xpath('//i/text()')
        marks = html.xpath('//td[@width=60][@align="center"]/text()')
        student_details = html.xpath('//b/text()')
        t_marks = html.xpath('//td/text()')
        if 'not' in t_marks[35]:
          print "Invalid USN"
        else:
          total_marks = int(t_marks[61])
          student_name = student_details[0]
          semester = int(student_details[2])
          result = student_details[3].encode('ascii','ignore')
          print "******************************************************************"
          print "Name : "+student_name
          print "Semester: ",semester
          print "Marks format is : Subject Name / Internal / External / Total"
          if  subjects:
              internal = marks[5::3]
              external = marks[4::3]
              total = marks[6::3]     
              for i,j,k,l in zip(subjects, internal, external, total):
                  print i,j,k,l
              print "Total Marks = ",total_marks
              if semester == 8:
                  print "Average = ",round(float(total_marks * 100)/750, 2)
              elif semester == 1 or semester == 2:
                  print "Average = ",round(float(total_marks * 100)/775, 2)
              else:
                  print "Average = ",round(float(total_marks * 100)/900, 2)
              print "Result = "+result[8:]
              print "complete result obtained"
              print "******************************************************************"

    else:
      return "Invalid USN"
      print "******************************************************************"
Beispiel #20
0
def main(benchmark, force_download, overwrite):

    config_path = os.path.join('benchmarks', benchmark, 'config.yml')
    config = resolve_config(config_path)
    source_folder = config.sources.root

    videos = scan_videos(source_folder, '**')

    if len(videos) == 0 or force_download:

        download_dataset(source_folder, url='https://winnowpre.s3.amazonaws.com/augmented_dataset.tar.xz')

        videos = scan_videos(source_folder, '**')

        print(f'Videos found after download:{len(videos)}')

    if len(videos) > 0:

        print('Video files found. Checking for existing signatures...')

        signatures_path = os.path.join(
                                    config.repr.directory,
                                    'video_signatures', '**',
                                    '**.npy')

        signatures = glob(os.path.join(signatures_path), recursive=True)

        if len(signatures) == 0 or overwrite:

            # Load signatures and labels
            #
            command = f'python extract_features.py -cp {config_path}'
            command = shlex.split(command)
            subprocess.run(command, check=True)

        # Check if signatures were generated properly
        signatures = glob(os.path.join(signatures_path), recursive=True)

        assert len(signatures) > 0, 'No signature files were found.'

        available_df = pd.read_csv(
                                os.path.join(
                                            'benchmarks',
                                            benchmark,
                                            'labels.csv'))
        frame_level = glob(
                        os.path.join(
                                    config.repr.directory,
                                    'frame_level', '**',
                                    '**.npy'), recursive=True)

        signatures_permutations = get_frame_sampling_permutations(
                                                        list(range(1, 6)),
                                                        frame_level)

        scoreboard = dict()

        for fs, sigs in signatures_permutations.items():

            results_analysis = dict()

            for r in np.linspace(0.1, 0.25, num=10):

                results = []

                for i in range(5):

                    mAP, pr_curve = get_result(
                                            available_df,
                                            sigs,
                                            ratio=r,
                                            file_index=frame_level)
                    results.append(mAP)

                results_analysis[r] = results

            scoreboard[fs] = results_analysis

        results_file = open('benchmarks/scoreboard.json', "w")
        json.dump(scoreboard, results_file)
        print('Saved scoreboard on {}'.format('benchmarks/scoreboard.json'))

    else:

        print(f'Please review the dataset (@ {source_folder})')
think there is a way to increase the number of people buying products right after seeing them in an online clip –
with shoppable videos. They let you click on something you see in the video – say you like the dress someone’s
wearing. A pop-up window with product information opens and you can buy it on the spot. In May, Instagram began
testing shoppable videos with some brands, apparently with encouraging results in February 2018."""

if __name__ == '__main__':
    ner = ner_core.NerCore(sentence)

    names = ner.extract_names("eng")
    locations = ner.extract_location("eng")
    dates = ner.extract_date_time()

    print("==============================")
    print("Result data extract (English)")
    print("========================")
    print("Name")
    print("====================")
    utils.get_result(names)

    print
    print("========================")
    print("Location")
    print("====================")
    utils.get_result(locations)

    print
    print("========================")
    print("Date time")
    print("====================")
    utils.get_result(dates)
Beispiel #22
0
 def test_proper_working_of_function_at_regular_case(self):
     test_row1 = TableRow(point=85, coefficient=6)
     test_row2 = TableRow(point=50, coefficient=4)
     table_rows = [test_row1, test_row2]
     self.assertEqual(utils.get_result(table_rows),
                      (85 * 6 + 50 * 4) / (6 + 4))
Beispiel #23
0
            X_dc, X_test_dc, _ = data_collaboration(Div_data, args)

            assert len(X_dc) == len(X_all)

            dc_model = GlobalModel(args, X_dc, num_class).set_model()

            if args.model == 'knn' or args.model == 'svm':
                dc_model.fit(X_dc, label_all)
                dc = dc_model.score(X_test_dc, label_test)
            else:
                dc_model.fit(X_dc,
                             label_all,
                             batch_size=args.batch_size,
                             epochs=args.nround,
                             verbose=0)
                dc = dc_model.evaluate(X_test_dc, label_test)[1]

            acc_dc[r, i] = dc

            end = time.time()
            time_dc[r, i] = end - start

    get_result(ndat_list,
               acc_cntr,
               acc_ind,
               acc_dc,
               time_dc,
               args,
               method='dc',
               setting='ndat')
Beispiel #24
0
            assert len(X_dc) == len(X_all)

            dc_model = GlobalModel(args, X_dc, num_class).set_model()

            if args.model == 'knn' or args.model == 'svm':
                dc_model.fit(X_dc, label_all)
                dc = dc_model.score(X_test_dc, label_test)
            else:
                dc_model.fit(X_dc,
                             label_all,
                             batch_size=args.batch_size,
                             epochs=args.nround,
                             verbose=0)
                dc = dc_model.evaluate(X_test_dc, label_test)[1]

            acc_dc[r, i] = dc

            end = time.time()
            time_dc[r, i] = end - start

    xval = np.arange(1, args.num_users + 1)
    get_result(xval,
               acc_cntr,
               acc_ind,
               acc_dc,
               time_dc,
               args,
               method='dc',
               setting='users')
def test_drop_alter_function(completer, action):
    assert get_result(completer, action + ' FUNCTION set_ret') == [
        function('set_returning_func', -len('set_ret'))
    ]
def test_builtin_function_matches_only_at_start(completer):
    text = 'SELECT IN'

    result = [c.text for c in get_result(completer, text)]

    assert 'MIN' not in result
Beispiel #27
0
 def test_coefficient_less_than_zero(self):
     test_row = TableRow(point=0, coefficient=-6)
     table_rows = [test_row]
     self.assertEqual(utils.get_result(table_rows), False)
Beispiel #28
0
                    # get each trained weight
                    user_w_list.append(local_model.get_weights())
                    # len(Div_data[user]['X'] == ndat
                    user_ndat_list.append(len(X_train[user_list[c]]))

                # calcurate FedAvg
                new_w = fed_avg(user_w_list, user_ndat_list)
                # set new weight to a global model
                fl_model.set_weights(new_w)

            fed = fl_model.evaluate(X_test, label_test)[1]

            end = time.time()

            acc_fed[r, i] = fed
            time_fed[r, i] = end - start

    # print('Time for computation: ', end_time - start_time)
    centr = np.round(np.mean(acc_cntr, 0), decimals=3)
    ind = np.round(np.mean(acc_ind, 0), decimals=3)
    fed = np.round(np.mean(acc_fed, 0), decimals=3)

    get_result(ndat_list,
               acc_cntr,
               acc_ind,
               acc_fed,
               time_fed,
               args,
               method='fed',
               setting='ndat')
Beispiel #29
0
 def test_no_table_rows(self):
     """
     without table rows, there is shouldn't be any result
     """
     table_rows = TableRow.objects.all()
     self.assertEqual(utils.get_result(table_rows), False)
Beispiel #30
0
    def get_group_usn(self):

        number = input("Enter the total number of USN, for which you want to get result : ")
        text_file = open("results.txt", "w")

        for i in range(number):
            usn = raw_input("Enter {} USN : ".format(i + 1))
            subjects = []
            s_data = []
            marks = []
            marks_variables = []
            variable = []
            valid = []
            text = []
            self.usn = usn

            if len(usn) == 10:
                # function to fetch the html format of the request usn's result
                soup = get_result(usn)
                validness = soup.find_all("td", {"width": "513"})
                for i in validness:
                    valid.append(i.text)
                valid = valid[0].split()

                if "not" in valid:
                    print "Invalid USN"
                    print "-------------------------------------------------"

                else:
                    # finding all the subjects
                    subject = soup.find_all("i")
                    for i in subject:
                        subjects.append(i.text)
                    # finding the student data
                    student_data = soup.find_all("b")
                    for i in student_data:
                        s_data.append(i.text)
                    # finding the student's marks
                    mark = soup.find_all("td", {"align": "center"})
                    for i in mark:
                        marks.append(i.text)

                    try:
                        # total_marks = int(marks_variables[97])
                        sem = int(s_data[4])
                        # print "Total Marks : ", total_marks
                        # text.append("Total Marks : "+marks_variables[97])

                        if sem == 8:
                            external = marks[4:28:4]
                            internal = marks[5:28:4]
                            total = marks[6:28:4]
                            status = marks[7:28:4]

                            variables = soup.find_all("td")
                            for i in variables:
                                marks_variables.append(i.text)

                            print "-------------------------------------------------"
                            print "ta-da!\n"
                            print "Name : " + s_data[2]
                            text.append("Name : " + s_data[2])
                            print s_data[3] + " " + s_data[4] + "\n"
                            text.append(s_data[3] + " " + s_data[4])
                            print "{0:57s} {1:10s} {2:12s} {3:12s} {4:30s}".format(
                                "Subjects", "External", "Internal", "Total", "Status"
                            )
                            text.append(
                                "{0:57s} {1:10s} {2:12s} {3:12s} {4:30s}".format(
                                    "Subjects", "External", "Internal", "Total", "Status"
                                )
                            )

                            for i, j, k, l, m in zip(subjects, external, internal, total, status):
                                text.append("{0:60s} {1:10s} {2:11s} {3:12s} {4:13s}".format(i, j, k, l, m))
                                print "{0:60s} {1:10s} {2:11s} {3:12s} {4:13s}".format(i, j, k, l, m)

                            print "\n"
                            total_m = 0
                            for i in range(len(total)):
                                total_m = total_m + int(str(total[i].encode("ascii", "ignore")))
                            text.append("Total Marks : " + str(total_m))
                            text.append("Average : " + str(round(float(total_m * 100) / 750, 2)))
                            print "Average : " + str(round(float(total_m * 100) / 750, 2))

                        elif sem == 1 or sem == 2:
                            external = marks[4:36:4]
                            internal = marks[5:36:4]
                            total = marks[6:36:4]
                            status = marks[7:36:4]

                            variables = soup.find_all("td")
                            for i in variables:
                                marks_variables.append(i.text)
                            print "-------------------------------------------------"
                            print "ta-da!\n"
                            print "Name : " + s_data[2]
                            text.append("Name : " + s_data[2])
                            print s_data[3] + " " + s_data[4] + "\n"
                            text.append(s_data[3] + " " + s_data[4])
                            print "{0:57s} {1:10s} {2:12s} {3:12s} {4:30s}".format(
                                "Subjects", "External", "Internal", "Total", "Status"
                            )
                            text.append(
                                "{0:57s} {1:10s} {2:12s} {3:12s} {4:30s}".format(
                                    "Subjects", "External", "Internal", "Total", "Status"
                                )
                            )

                            for i, j, k, l, m in zip(subjects, external, internal, total, status):
                                text.append("{0:60s} {1:10s} {2:11s} {3:12s} {4:13s}".format(i, j, k, l, m))
                                print "{0:60s} {1:10s} {2:11s} {3:12s} {4:13s}".format(i, j, k, l, m)

                            print "\n"
                            total_m = 0
                            for i in range(len(total) - 1):
                                total_m = total_m + int(str(total[i].encode("ascii", "ignore")))
                            text.append("Total Marks : " + str(total_m))
                            text.append("Average : " + str(round(float(total_m * 100) / 775, 2)))
                            print "Average : ", round(float(total_m * 100) / 775, 2)

                        else:
                            external = marks[4:36:4]
                            internal = marks[5:36:4]
                            total = marks[6:36:4]
                            status = marks[7:36:4]

                            variables = soup.find_all("td")
                            for i in variables:
                                marks_variables.append(i.text)

                            print "-------------------------------------------------"
                            print "ta-da!\n"
                            print "Name : " + s_data[2]
                            text.append("Name : " + s_data[2])
                            print s_data[3] + " " + s_data[4] + "\n"
                            text.append(s_data[3] + " " + s_data[4])
                            print "{0:57s} {1:10s} {2:12s} {3:12s} {4:30s}".format(
                                "Subjects", "External", "Internal", "Total", "Status"
                            )
                            text.append(
                                "{0:57s} {1:10s} {2:12s} {3:12s} {4:30s}".format(
                                    "Subjects", "External", "Internal", "Total", "Status"
                                )
                            )

                            for i, j, k, l, m in zip(subjects, external, internal, total, status):
                                text.append("{0:60s} {1:10s} {2:11s} {3:12s} {4:13s}".format(i, j, k, l, m))
                                print "{0:60s} {1:10s} {2:11s} {3:12s} {4:13s}".format(i, j, k, l, m)

                            print "\n"
                            total_m = 0

                            for i in range(len(total)):
                                total_m = total_m + int(str(total[i].encode("ascii", "ignore")))
                            text.append("Total Marks : " + str(total_m))
                            text.append("Average : " + str(round(float(total_m * 100) / 900, 2)))
                            print "Average : ", round(float(total_m * 100) / 900, 2)

                        res = s_data[5].split()[1:]
                        text.append("Result : " + " ".join(res))
                        print "Result : " + " ".join(res)
                        text.append("Congratulations!")
                        print "Congratulations!"
                        print "Bye " + s_data[2] + ", see you later!"
                        for i in text:
                            text_file.write(i + "\n")
                        text_file.write("---------------------\n\n")
                        print "-------------------------------------------------"

                    except ValueError, IndexError:
                        print "Some Error Occured"

            else:
                print "Invalid USN"
                print "-------------------------------------------------"
Beispiel #31
0
def run(work_path):

    global data

    # 系统初始化,参数要与创建技能时填写的检验值保持一致
    hilens.init("driving")

    # 初始化自带摄像头与HDMI显示器,
    # hilens studio中VideoCapture如果不填写参数,则默认读取test/camera0.mp4文件,
    # 在hilens kit中不填写参数则读取本地摄像头
    camera = hilens.VideoCapture()

    display = hilens.Display(hilens.HDMI)

    if rec:
        rec_video(camera, display, show)

    # 初始化模型
    # -*- coding: utf-8 -*-
    # model_path = os.path.join(work_path, 'model/yolo3_darknet53_raw3_4_sup_slope_terminal_t.om')
    model_path = os.path.join(work_path, 'model/yolo3_darknet53_raw3_4_sup_slope_now_terminal_t.om')

    driving_model = hilens.Model(model_path)

    frame_index = 0
    json_bbox_list = []
    json_data = {'info': 'det_result'}

    while True:
        frame_index += 1
        try:
            time_start = time.time()

            # 1. 设备接入 #####
            input_yuv = camera.read()  # 读取一帧图片(YUV NV21格式)

            # 2. 数据预处理 #####
            if rgb:
                img_rgb = cv2.cvtColor(input_yuv, cv2.COLOR_YUV2RGB_NV21)  # 转为RGB格式
            else:
                img_rgb = cv2.cvtColor(input_yuv, cv2.COLOR_YUV2BGR_NV21)  # 转为BGR格式

            if pad:
                img_preprocess, img_w, img_h, new_w, new_h, shift_x_ratio, shift_y_ratio = preprocess_with_pad(img_rgb)  # 缩放为模型输入尺寸
                # 3. 模型推理 #####
                output = driving_model.infer([img_preprocess.flatten()])
                # 4. 获取检测结果 #####
                bboxes = get_result_with_pad(output, img_w, img_h, new_w, new_h, shift_x_ratio, shift_y_ratio)
            else:
                img_preprocess, img_w, img_h = preprocess(img_rgb)  # 缩放为模型输入尺寸
                # 3. 模型推理 #####
                output = driving_model.infer([img_preprocess.flatten()])
                # 4. 获取检测结果 #####
                bboxes = get_result(output, img_w, img_h)

            # # 5-1. [比赛提交作品用] 将结果输出到json文件中 #####
            # if len(bboxes) > 0:
            #     json_bbox = convert_to_json(bboxes, frame_index)
            #     json_bbox_list.append(json_bbox)
            # # if bboxes != []:
            # #     print()

            if socket_use:
                data = data_generate_4(bboxes)

            # 5-2. [调试用] 将结果输出到display #####
            if show:
                if rgb:
                    img_bgr = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2BGR)
                else:
                    img_bgr = img_rgb
                img_bgr, labelName = draw_boxes(img_bgr, bboxes)  # 在图像上画框
                output_yuv = hilens.cvt_color(img_bgr, hilens.BGR2YUV_NV21)
                display.show(output_yuv)  # 显示到屏幕上
            if log:
                time_frame = 1000 * (time.time() - time_start)
                hilens.info('----- time_frame = %.2fms -----' % time_frame)

        except RuntimeError:
            print('last frame')
            break

    # 保存检测结果
    hilens.info('write json result to file')
    result_filename = './result.json'
    json_data['result'] = json_bbox_list
    save_json_to_file(json_data, result_filename)
    hilens.terminate()
Beispiel #32
0
            z_std = z_logvar.mul(0.5).exp_()
            data = data.view(-1, output_size)
            KLD = -0.5 * (1 + z_logvar - z_mu**2 - z_logvar.exp()).sum(-1)

            recon_prob = (get_log_prob(data, x_mu, x_logvar)
                          )  #+ get_log_prob(z, 0,1).sum(-1).unsqueeze(1))

            N_recon = recon_prob[train_idx_anomaly[idx_b]].sum(-1).exp().mean()
            A_recon = recon_prob[train_idx_anomaly[idx_b]].sum(-1).exp().mean()
            N_z = z[train_idx_normal[idx_b]]
            A_z = z[train_idx_anomaly[idx_b]]

            loss = -(recon_prob.sum(-1)).mean() + KLD.mean()

            true, prec, rec, bestf1_z = get_result(
                -get_log_prob(z, 0, 1).detach().cpu().numpy().sum(-1),
                train_idx_anomaly[idx_b].detach().cpu().numpy())
            true, prec, rec, bestf1_p = get_result(
                -recon_prob.sum(-1).detach().cpu().numpy(),
                train_idx_anomaly[idx_b].detach().cpu().numpy())
            true, prec, rec, bestf1_x = get_result(
                (data - x_mu).abs().sum(-1).detach().cpu().numpy(),
                train_idx_anomaly[idx_b].detach().cpu().numpy())
            writer.add_scalars('loss', {'train loss': loss},
                               e * batch_len + idx_b + 1)

            # writer.add_scalars('N-A ratio', {'train':  N_recon/ A_recon }, e + 1)
            # writer.add_scalars('KL', {'Normal' : get_log_prob(N_z,0,1).sum(-1).exp().mean(),
            #                           'Anomaly': get_log_prob(A_z,0,1).sum(-1).exp().mean()}, e+1)
            writer.add_scalars('Best f1-score',
                               {'train - z likelihood': bestf1_z},
Beispiel #33
0
 def test_coefficient_less_than_zero(self):
     test_row = TableRow(point=0, coefficient=-6)
     table_rows = [test_row]
     self.assertEqual(utils.get_result(table_rows), False)
Beispiel #34
0
 def test_proper_working_of_function_at_regular_case(self):
     test_row1 = TableRow(point=85, coefficient=6)
     test_row2 = TableRow(point=50, coefficient=4)
     table_rows = [test_row1, test_row2]
     self.assertEqual(utils.get_result(table_rows), (85*6 + 50*4)/(6+4))
Beispiel #35
0
 def test_no_table_rows(self):
     """
     without table rows, there is shouldn't be any result
     """
     table_rows = TableRow.objects.all()
     self.assertEqual(utils.get_result(table_rows), False)
Beispiel #36
0
def test_builtin_function_matches_only_at_start(completer):
    text = 'SELECT IN'

    result = [c.text for c in get_result(completer, text)]

    assert 'MIN' not in result
Beispiel #37
0
                        label_train[user_list[c]],
                        batch_size=args.batch_size,
                        epochs=args.epoch,
                        verbose=0)  # , validation_data=(X_test, label_test)

                    # get each trained weight
                    user_w_list.append(local_model.get_weights())
                    user_ndat_list.append(len(X_train[user_list[c]]))

                # calcurate FedAvg
                new_w = fed_avg(user_w_list, user_ndat_list)
                # set new weight to a global model
                fl_model.set_weights(new_w)

            fed = fl_model.evaluate(X_test, label_test)[1]

            end = time.time()

            acc_fed[r, i] = fed
            time_fed[r, i] = end - start

    xval = np.arange(1, args.num_users + 1)
    get_result(xval,
               acc_cntr,
               acc_ind,
               acc_fed,
               time_fed,
               args,
               method='fed',
               setting='users')
Beispiel #38
0
def test_drop_alter_function(completer, action):
    assert get_result(completer, action + ' FUNCTION set_ret') == [
        function('set_returning_func', -len('set_ret'))
    ]
Beispiel #39
0
import cv2
from config import CHECKPOINT_PATH
from config import CLASSES
from utils import get_device
from utils import get_net
from utils import get_result

if __name__ == '__main__':
    device = get_device()
    num_classes = len(CLASSES)

    net = get_net('mobilenet_v2', 'val', device, num_classes=num_classes,
                  checkpoint_path=CHECKPOINT_PATH)

    # cap = cv2.VideoCapture(0)
    cap = cv2.VideoCapture(os.path.join('videos', 'video_01.mp4'))

    while True:
        _, frame = cap.read()
        result = get_result(frame, CLASSES, net, device)
        print(f'Result: {result}')

        # Display the resulting frame
        cv2.imshow('frame', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()