示例#1
0
    def collect_gesture(self, capture, ges, photo_num):
        photo_num = photo_num
        vedeo = False
        predict = False
        count = 0
        # 读取默认摄像头
        cap = cv2.VideoCapture(capture)
        # 设置捕捉模式
        cap.set(10, 200)
        # 背景减法创建及初始化
        bgModel = cv2.createBackgroundSubtractorMOG2(0, self.bgSubThreshold)

        while True:
            # 读取视频帧
            ret, frame = cap.read()
            # 镜像转换
            frame = cv2.flip(frame, 1)

            cv2.imshow('Original', frame)
            # 双边滤波
            frame = cv2.bilateralFilter(frame, 5, 50, 100)

            # 绘制矩形,第一个为左上角坐标(x,y),第二个为右下角坐标
            # rec = cv2.rectangle(frame, (220, 50), (450, 300), (255, 0, 0), 2)
            rec = cv2.rectangle(frame, (self.x1, self.y1), (self.x2, self.y2),
                                (255, 0, 0), 2)

            # 定义roi区域,第一个为y的取值,第2个为x的取值
            # frame = frame[50:300, 220:450]
            frame = frame[self.y1:self.y2, self.x1:self.x2]

            # 背景减法运动检测
            bg = bgModel.apply(frame, learningRate=0)
            # 显示背景减法的窗口
            cv2.imshow('bg', bg)
            # 图像边缘处理--腐蚀
            fgmask = cv2.erode(bg, self.skinkernel, iterations=1)
            # 显示边缘处理后的图像
            cv2.imshow('erode', fgmask)
            # 将原始图像与背景减法+腐蚀处理后的蒙版做"与"操作
            bitwise_and = cv2.bitwise_and(frame, frame, mask=fgmask)
            # 显示与操作后的图像
            cv2.imshow('bitwise_and', bitwise_and)
            # 灰度处理
            gray = cv2.cvtColor(bitwise_and, cv2.COLOR_BGR2GRAY)
            # 高斯滤波
            blur = cv2.GaussianBlur(gray, (self.blurValue, self.blurValue), 2)
            # cv2.imshow('GaussianBlur', blur)

            # 使用自适应阈值分割(adaptiveThreshold)
            thresh = cv2.adaptiveThreshold(blur, 255,
                                           cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                           cv2.THRESH_BINARY, 11, 2)
            cv2.imshow('th3', thresh)

            Ges = cv2.resize(thresh, (100, 100))
            # 图像的阈值处理(采用ostu)
            # _, thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
            # cv2.imshow('threshold1', thresh)

            if predict == True:

                # img = cv2.resize(thresh, (100, 100))
                img = np.array(Ges).reshape(-1, 100, 100, 1) / 255
                prediction = p_model.predict(img)
                final_prediction = [result.argmax()
                                    for result in prediction][0]
                ges_type = self.gesture[final_prediction]
                print(ges_type)
                cv2.putText(rec,
                            ges_type, (self.x1, self.y1),
                            fontFace=cv2.FONT_HERSHEY_COMPLEX,
                            fontScale=2,
                            thickness=3,
                            color=(0, 0, 255))
                # cv2.putText(rec, ges_type, (150, 220), fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=1, thickness=3, color=(0, 0, 255))

            cv2.imshow('Original', rec)
            if vedeo is True and count < photo_num:
                # 录制训练集
                cv2.imencode(
                    '.jpg',
                    Ges)[1].tofile(self.train_path + '{}_{}.jpg'.format(
                        str(random.randrange(1000, 100000)), str(ges)))
                count += 1
                print(count)
            elif count == photo_num:
                print('{}张测试集手势录制完毕,3秒后录制此手势测试集,共{}张'.format(
                    photo_num, photo_num * 0.43 - 1))
                time.sleep(3)
                count += 1
            elif vedeo is True and photo_num < count < photo_num * 1.43:
                cv2.imencode(
                    '.jpg',
                    Ges)[1].tofile(self.predict_path + '{}_{}.jpg'.format(
                        str(random.randrange(1000, 100000)), str(ges)))
                count += 1
                print(count)
            elif vedeo is True and count == photo_num * 1.43:
                vedeo = False
                ges += 1
                print('此手势录制完成,按l录制下一个手势,按t结束录制并进行训练')

            k = cv2.waitKey(1)
            if k == 27:
                break

            elif k == ord('l'):  # 录制手势
                vedeo = True
                count = 0

            elif k == ord('p'):  # 预测手势
                predict = True
                while True:
                    model_name = input('请输入模型的名字\n')
                    if model_name == 'exit':
                        break
                    if model_name in os.listdir('./'):
                        print('正在加载{}模型'.format(model_name))
                        p_model = load_model(model_name)
                        break
                    else:
                        print('模型名字输入错误,请重新输入,或输入exit退出')

            elif k == ord('r'):
                bgModel = cv2.createBackgroundSubtractorMOG2(
                    0, self.bgSubThreshold)
                print('背景重置完成')

            elif k == ord('t'):
                os.environ["CUDA_VISIBLE_DEVICES"] = "0"
                train = Training(batch_size=32,
                                 epochs=5,
                                 categories=len(self.gesture),
                                 train_folder=self.train_path,
                                 test_folder=self.predict_path,
                                 model_name=p_model)
                train.train()
                backend.clear_session()
示例#2
0
output = "{0}/{1}".format(config.output_path, hep_data_name)
output += "/training-{0}/".format(date)
dnn.output_dir = output

vb.INFO("RUN :  Saving output to {0}".format(output))
if not os.path.isdir(output):
    vb.WARNING("RUN : '{0}' does not exist ".format(output))
    vb.WARNING("RUN :       Creating the directory. ")
    os.system('mkdir -p {0}'.format(output))

## -- Copy the configuration file to the output directory
os.system("cp {0} {1}".format(sys.argv[1], output))

## -- Slice rows of the dataframe (remove from training)
##    list of strings with arguments separated by a space
slices = [
    'AK4_deepCSVb >= 0', 'AK4_deepCSVbb >= 0', 'AK4_deepCSVc >= 0',
    'AK4_deepCSVl >= 0'
]  # want all AK4 to have 'good' b-tagging scores
#slices = ['AK4_deepFlavorb >= 0','AK4_deepFlavorbb >= 0','AK4_deepFlavorc >= 0',
#          'AK4_deepFlavoruds >= 0','AK4_deepFlavorg >= 0','AK4_deepFlavorlepb >= 0'] # want all AK4 to have 'good' b-tagging scores

## Setup
dnn.initialize()
dnn.load_data(['target'])  # load HEP data (add 'target' branch to dataframe)
dnn.preprocess_data(
    slices)  # equal statistics for each class & remove bad rows
dnn.train(ndims=1)  # build and train the model!

## END ##
示例#3
0
angry_p = Perceptron(angry_weights, 4)
perceptron_array = [happy_p, sad_p, mischievous_p, angry_p]

# The training starts.
percentage = 0
# Trains as long as the score is lower than 75%
while percentage < 0.8:
    print("Training...")
    total_training_result = []
    for i in range(len(image_array)):
        for j in range(len(perceptron_array)):
            # Calculates the output for every perceptron on every image, and trains it.
            perceptron_array[j].activate_1(image_array[i])

            session = Training(image_array[i], perceptron_array[j], facit[i])
            session.train()
        # Look which perceptron that was most active.
        winner = get_winner(perceptron_array)
        total_training_result.append(winner)

    # Calculates the percentage of correct answers.
    percentage = calc_points(total_training_result, facit)
    percentage = percentage / len(image_array)

    print("I got %.2f percent correct this training round." %
          (percentage * 100))
    time.sleep(1)

print("Let´s do the test!")
print("________________________________")
time.sleep(1)
示例#4
0
mode = sys.argv[1]
if mode == "train" or mode == "onlyexport":
    data_file = sys.argv[2]
    parameter_file = sys.argv[3]
    examples_file = sys.argv[4]
    with codecs.open(parameter_file, 'r', 'utf-8') as f:
        params = json.load(f)
    with codecs.open(examples_file, 'r', 'utf-8') as f:
        examples = json.load(f)
    t = Training(data_file=data_file, examples=examples)

    for p in params:
        print("Trying:{}".format(p))
        p["only_export"] = (mode == "onlyexport")
        t.train(**p)

if mode == "predict":
    vocab_file = sys.argv[2]
    model_file = sys.argv[3]
    text = sys.argv[4]
    p = Prediction(model_file, vocab_file)
    print(p.predict(text))

if mode == "predict_server":
    data_file = sys.argv[2]
    model_file = sys.argv[3]
    p = Prediction(model_file, data_file)
    db = get_database_from_file(data_file)
    PredictServer(p, db)
示例#5
0
config['lr'] = 1e-5
config['accumulated'] = 2
fine_tuning = False

zalo = ZaloDatasetProcessor()
zalo.load_from_path(dataset_path='dataset',
                    train_filename='combine.json',
                    test_filename='test.json',
                    dev_filename='dev.json')
tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-cased')
features_train = zalo.convert_examples_to_features(zalo.train_data,
                                                   zalo.label_list, 256,
                                                   tokenizer)
features_test = zalo.convert_examples_to_features(zalo.test_data,
                                                  zalo.label_list, 256,
                                                  tokenizer)
features_dev = zalo.convert_examples_to_features(zalo.dev_data,
                                                 zalo.label_list, 256,
                                                 tokenizer)
if __name__ == "__main__":

    NUM_OF_INTENT = 2
    config_model = BertConfig.from_pretrained('bert-base-multilingual-cased',
                                              output_hidden_states=True)
    model = QAModel(config_model, NUM_OF_INTENT)
    if fine_tuning:
        model.load_state_dict(torch.load('models/model-squad1.bin'))
    training = Training(features_train, features_dev, model, logger,
                        zalo.label_list, config)
    training.train()
示例#6
0
def main():
    X = []
    Y = []
    char2intDict = None
    int2charDict = None
    vocabulary = None
    config = FileHelper.load_config('config.json')

    seq_length = config['preprocessing']['sequence_chars_length']

    # Load data or preprocess
    if not config['preprocessing']['exec_preprocessing']:
        X = FileHelper.load_object_from_file(
            config['preprocessing']['checkpoints']['X_file'])
        Y = FileHelper.load_object_from_file(
            config['preprocessing']['checkpoints']['Y_file'])
        char2intDict = FileHelper.load_object_from_file(
            config['preprocessing']['checkpoints']['char2intDict_file'])
        int2charDict = FileHelper.load_object_from_file(
            config['preprocessing']['checkpoints']['int2charDict_file'])
    else:
        preprocessing = Preprocessing(config)
        X, Y, char2intDict, int2charDict = preprocessing.preprocess()
        FileHelper.save_object_to_file(
            config['preprocessing']['checkpoints']['X_file'], X)
        FileHelper.save_object_to_file(
            config['preprocessing']['checkpoints']['Y_file'], Y)

    vocabulary = FileHelper.load_object_from_file(
        config['preprocessing']['checkpoints']['vocabulary_file'])

    # Save the unshaped version of X because it's needed for generation later
    X_unshaped = X

    # Transform the data to the format the LTSM expects it [samples, timesteps, features]
    X = numpy.reshape(X, (len(X), seq_length, 1))
    # Normalize/rescale all integers to range 0-1
    X = X / float(len(vocabulary))
    # As usual do one-hot encoding for categorial variables to the output variables (vector of zeros with a single 1 --> 0..N-1 categories)
    Y = np_utils.to_categorical(Y)

    training = Training(config)
    # Define the model
    model = training.define_model(X, Y)

    if config['training']['exec_training']:
        # Train the model
        model = training.train(X, Y, char2intDict, vocabulary, model)
    else:
        # Just set the previously trained weights for the model
        model.load_weights(config['training']['load_weights_filename'])
        model.compile(loss='categorical_crossentropy', optimizer='adam')

    if config['generation']['exec_generation']:
        # Generate the random seed used as starting value for text generation
        seed = generate_random_seed(X_unshaped)
        generatedText = generate_text(
            config['generation']['text_chars_length'], int2charDict,
            vocabulary, seed, model)

        # Save the generated text to file
        outputFilename = config['generation']['foldername'] + '/' + \
            datetime.datetime.now().strftime('%Y%m%d_%H_%M_%S') + '.txt'
        FileHelper.write_data(outputFilename, generatedText)
示例#7
0
# for env in envs:
# if 'Box' in str(env.action_space):
#     dqn = DQN(env.observation_space.shape, env.action_space.shape[0])
# else:
for trial in range(10):
    t = Training(env)
    print('-=-°' * 15 + "TRIAL #{}".format(trial) + '-=-°' * 15)

    max_sustained = -1
    max_ever = -1
    for i in range(10):
        # t.test_greedy(True)

        for j in range(25):
            t.explore(5)
            t.train(5)

        a, m = t.avg_max_scores(10)
        if a > max_sustained:
            max_sustained = a
        if m > max_ever:
            max_ever = m
        print("scores: avg {} ; max {}".format(a, m))
        # t.test_greedy(True)
        # b = t.evaluate(dots)
    print('-=-°' * 15 +
          "ACIEVEMENTS: #{} / {}".format(max_sustained, max_ever) +
          '-=-°' * 15)


def evaluate():
 def repetition(i):
     folder = subfolder + "run_" + str(i) + "/"
     t = Training(year, config, folder, self.debug)
     return t.train()