예제 #1
0
파일: main.py 프로젝트: queer1/porn_sieve
    def __init__(self):
        super(Window, self).__init__()
        self.site = "xvideos"
        self.xpaths = get_niche_xpaths(self.site)
        self.start_pg = 0
        self.max_pgs = 0
        self.cur_vid = None
        self.cur_img = None
        self.last_pred = None
        self.default_img_flag = True
        self.preview_size = 2**9  # arbitrary number

        self.winlock = RLock()
        self.thr = None
        self.q = PriorityQueue()
        self.db = Database()
        self.scraper = site_selector(self.site)

        self.set_keybindings()

        # Create the entirety of the GUI and
        # link to appropriate functions.
        self.setWindowTitle('P**n!')
        self.layout = QtGui.QHBoxLayout()

        self.init_left_pane()
        self.init_middle_pane()
        self.init_right_pane()

        self.setLayout(self.layout)
        self.show()

        self.predict = Predictor()
예제 #2
0
파일: webcam.py 프로젝트: AminHP/RTFER-old
def run():
    predictor = Predictor()
    predictor.load_model()

    cv2.namedWindow("Webcam")
    capture = cv2.VideoCapture(0)

    if not capture.isOpened():
        return

    while True:
        rval, frame = capture.read()
        if not rval:
            break

        image = frame
        gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        for face_info in predictor.predict(gray_image):
            draw_face_info(image, face_info)
        draw_landmarks(image, get_all_landmarks(gray_image), draw_dots=True, visualize=False)

        cv2.imshow("Webcam", image)
        key = cv2.waitKey(1)
        if key == 27 or key == ord('q'): # exit on ESC or Q
            break

    cv2.destroyWindow("Webcam")
    capture.release()
예제 #3
0
def get_validation_metrics(model: NeuralMachineTranslator,
                           iterations: int,
                           training_evaluator: Evaluator,
                           validation_evaluator: Evaluator,
                           training_iterator: BucketIterator,
                           validation_iterator: BucketIterator) -> Tuple[Metrics, Metrics]:
    # get predictor
    predictor = Predictor(model)

    validation_evaluator.clear_sentences()
    # loop over validation sentences and add predictions to evaluator
    for i in range(iterations):
        validation_batch = next(iter(validation_iterator))
        predicted_sentence, _ = predictor.predict(validation_batch)
        validation_evaluator.add_sentences(validation_batch.trg[0], predicted_sentence, model.EOS)

    # get validation metrics
    validation_metrics = Metrics(validation_evaluator.bleu(), validation_evaluator.ter(), 0)

    training_evaluator.clear_sentences()
    # get 50 batches from training data and add predictions to evaluator
    for i in range(50):
        batch = next(iter(training_iterator))
        predicted_sentence, _ = predictor.predict(batch)
        training_evaluator.add_sentences(batch.trg[0], predicted_sentence, model.EOS)

    # get training metrics
    training_metrics = Metrics(training_evaluator.bleu(), training_evaluator.ter(), 0)

    return validation_metrics, training_metrics
예제 #4
0
파일: train.py 프로젝트: ShuangPA/edit_bert
    def __init__(self, bert_config_file, is_training, num_labels, train_file,
                 dev_file, vocab_file, output_dir, max_seq_length,
                 learning_rate, batch_size, epochs, warmup_proportion,
                 virtual_batch_size_ratio, evaluate_every, init_ckpt):
        os.system(f"mkdir {output_dir}")
        self._data_train = Dataset(train_file, num_labels, vocab_file, True,
                                   output_dir, True, max_seq_length)
        self._dev_data = Dataset(dev_file, num_labels, vocab_file, True,
                                 output_dir, False, max_seq_length)
        num_train_step = int(self._data_train.size / batch_size * epochs)
        num_warmup_step = int(num_train_step * warmup_proportion)

        self._model = Model(bert_config_file, max_seq_length, init_ckpt,
                            is_training, num_labels)

        self._train_op, self._global_step = optimization.create_optimizer(
            self._model.loss, learning_rate, num_train_step, num_warmup_step,
            False, virtual_batch_size_ratio)

        self.batch_size = batch_size
        self.epochs = epochs
        self.evaluate_every = evaluate_every
        self.output_dir = output_dir
        self._predictor = Predictor(bert_config_file, max_seq_length,
                                    num_labels)
예제 #5
0
def main(arguments):
    model_parameters = {
        'scale': arguments['scale'],
        'learning_rate': 1e-5,
        'D': arguments['D'],
        'C': arguments['C'],
        'G': arguments['G'],
        'kernel_size': 3,
        'c_dim': 3,
        'G0': arguments['G0'],
    }

    model = load_model(model_parameters, arguments['vgg'], verbose=arguments['verbose'])

    if arguments['summary'] is True:
        model.rdn.summary()

    if arguments['train'] is True:
        from trainer.train import Trainer

        trainer = Trainer(train_arguments=arguments)
        trainer.train_model(model)

    if arguments['test'] is True:
        from predict import Predictor

        predictor = Predictor(test_arguments=arguments)
        predictor.get_predictions(model)
예제 #6
0
    def __init__(self, w, h, og_w, custom, obj_name, is_ofcalc=False):
        self.cheat_data = self.read_csv('../detector/data/data.csv')

        self.obj_name = obj_name
        self.c_start = 0
        self.prev_imgs = []

        self.resize_w = w
        self.resize_h = h
        self.multi = w / og_w

        if custom:
            self.start_idx = 1
            self.label_idx = 0
        else:
            self.start_idx = 8
            self.label_idx = 1

        self.is_ofcalc = is_ofcalc

        self.p = Predictor(self.resize_w, self.resize_h,
                           'models/model_detectorv2.h5')

        self.of_w = 376
        self.of_h = 240
        self.skipped_frames = 3

        self.rect_color = (10, 125, 10)
        self.font_size = 1
        self.font_thickness = 1
예제 #7
0
 def status(self):
     # INPUT_DATA is a dataframe
     PreprocessedData = preprocess(self.DATA)
     # preprocessed data is a dataframe
     model = Predictor()
     status = model.predict(PreprocessedData)
     return status
예제 #8
0
 def __init__(self):
     """
     constructor
     """
     self.__pre_processor = PreProcessor()
     self.__trainer = Trainer()
     self.__predictor = Predictor()
예제 #9
0
 def allocate(self, points_per_network, W, num_layers, step,
              max_iterations):
     n = randint(self.n1, self.n2)
     while n in self.predictor_array:
         n = randint(self.n1, self.n2)
     self.predictor_array[n] = Predictor(points_per_network, W, num_layers,
                                         step, max_iterations)
     return n
def autonomous_control(model):
    """Run the car autonomously"""
    predictor = Predictor(model)
    with picamera.PiCamera() as camera:
        camera.resolution = configuration.PICAMERA_RESOLUTION
        camera.framerate = configuration.PICAMERA_FRAMERATE
        time.sleep(configuration.PICAMERA_WARM_UP_TIME)
        camera.rotation = 180
        pwm = motor_driver_helper.get_pwm_imstance()
        motor_driver_helper.start_pwm(pwm)
        forward_cycle_count = left_cycle_count = right_cycle_count = 0
        should_brake = False

        while True:
            stream = io.BytesIO()
            camera.capture(stream, format='jpeg', use_video_port=True)
            direction = predictor.predict(stream)
            image_helper.save_image_with_direction(stream, direction)
            stream.flush()
            if direction == 'forward':
                should_brake = True
                left_cycle_count = right_cycle_count = 0
                forward_cycle_count = reduce_speed(pwm, forward_cycle_count)
                motor_driver_helper.set_front_motor_to_idle()
                motor_driver_helper.set_forward_mode()
            elif direction == 'left':
                should_brake = True
                forward_cycle_count = right_cycle_count = 0
                left_cycle_count = increase_speed_on_turn(
                    pwm, left_cycle_count)
                motor_driver_helper.set_left_mode()
                motor_driver_helper.set_forward_mode()
            elif direction == 'right':
                should_brake = True
                forward_cycle_count = left_cycle_count = 0
                right_cycle_count = increase_speed_on_turn(
                    pwm, right_cycle_count)
                motor_driver_helper.set_right_mode()
                motor_driver_helper.set_forward_mode()
            elif direction == 'reverse':
                should_brake = True
                #motor_driver_helper.set_front_motor_to_idle()
                motor_driver_helper.set_right_mode()
                motor_driver_helper.set_reverse_mode()

            else:
                if should_brake:
                    print("braking...")
                    #motor_driver_helper.set_reverse_mode()
                    time.sleep(0.2)
                    should_brake = False
                motor_driver_helper.set_idle_mode()
                forward_cycle_count = left_cycle_count = right_cycle_count = 0
                motor_driver_helper.change_pwm_duty_cycle(pwm, 100)
            print(direction)
예제 #11
0
def test_within_last_year(semester, result):
    course_dict = {}
    course_dict["1234"] = Course(title="title",
                                 semester=semester,
                                 professor="professor",
                                 crn="123456",
                                 status="OPEN")
    predictor = Predictor(course_dict["1234"],
                          [201903, 201902, 201901, 201803], "fall")
    assert predictor.within_last_year(
        course_dict["1234"].instances["123456"]) == result
예제 #12
0
def test_factor_in_enrollment(status, semester, score):
    course_dict = {}
    course_dict["1234"] = Course(title="title",
                                 semester=semester,
                                 professor="proferssor",
                                 crn="123456",
                                 status=status)
    predictor = Predictor(course_dict["1234"],
                          [201903, 201902, 201901, 201803], "fall")
    predictor.factor_in_enrollment()
    assert predictor.score == score
예제 #13
0
def filter(f1_recoder: dict, cur_model: str, args):
    predictor = Predictor(os.path.join(cur_model, 'model.pt'), args.word_vocab,
                          args.label_vocab, 'data/dev/word.txt',
                          'data/dev/label.txt', args.cuda)
    score = predictor.predict()
    f1_recoder[score] = cur_model
    if len(f1_recoder) > 10:
        # remove the worst model
        key = sorted(f1_recoder.keys(), reverse=True)[-1]
        path = f1_recoder.pop(key)
        shutil.rmtree(path)
예제 #14
0
def test_likely_to_be_offered():
    course_dict = {}
    course_dict["1234"] = Course(title="title",
                                 semester="spring 2019",
                                 professor="professor",
                                 crn="123456",
                                 status="OPEN")
    predictor = Predictor(course_dict["1234"],
                          [201903, 201902, 201901, 201803], "summer")
    predictor.score = 3
    assert not predictor.likely_to_be_offered()
예제 #15
0
    def __init__(self):
        self.t_step = 0
        self.max_step = 10
        self.done = False
        self.goal = 0.6

        #0:do noting, 1:contrast(1.1), 2:contrast(0.9), 3:saturation(1.1), 4:saturation(0.9),
        # 5:exposure(1.1), 6:exposure(0.9)
        self.t_action = ["0", "1", "2", "3", "4", "5", "6"]
        self.actions = {}

        self.get_reward = Predictor()
def upload_file():
    if request.method == 'POST':
        f = request.files['file']
        f.save(os.path.join("./static/files", secure_filename(f.filename)))
        pred = Predictor(
            os.path.join("./static/files", secure_filename(f.filename)),
            os.path.join("./static/output",
                         secure_filename(f.filename))).predictor_func()
        return render_template('complete.html',
                               location=os.path.join(
                                   "./static/output",
                                   secure_filename(f.filename)))
예제 #17
0
    def post(self):
        sentence = self.get_argument('sentence')
        #lan=0 English lan=1 Chinese
        lan = self.get_argument('language')
        print('sentence passed is %s' % sentence)

        #sentence = '{"doc_label": ["Computer--MachineLearning--DeepLearning", "Neuro--ComputationalNeuro"],"doc_token": ["I", "love", "deep", "learning"],"doc_keyword": ["deep learning"],"doc_topic": ["AI", "Machine learning"]}'
        config = Config(config_file='conf/train.json')
        if lan == '0':
            config = Config(config_file='conf/train.json')
        if lan == '1':
            print('trains.json used')
            config = Config(config_file='conf/train2.json')
        predictor = Predictor(config)
        batch_size = config.eval.batch_size
        input_texts = []
        predict_probs = []
        is_multi = config.task_info.label_type == ClassificationType.MULTI_LABEL
        #TODO pass sentence as input_texts
        #for line in codecs.open(sys.argv[2], "r", predictor.dataset.CHARSET):
        #    input_texts.append(line.strip("\n"))
        #    epoches = math.ceil(len(input_texts)/batch_size)
        # for line in iter(sentence, "\n"):
        #     print('current line is %s' %line)
        #     input_texts.append(line.strip("\n"))
        #     epoches = math.ceil(len(input_texts)/batch_size)

        input_texts.append(sentence.strip("\n"))
        epoches = math.ceil(len(input_texts) / batch_size)

        print('input_texts needed to be predicted is %s' % input_texts)

        for i in range(epoches):
            batch_texts = input_texts[i * batch_size:(i + 1) * batch_size]
            predict_prob = predictor.predict(batch_texts)
        for j in predict_prob:
            predict_probs.append(j)

        for predict_prob in predict_probs:
            if not is_multi:
                predict_label_ids = [predict_prob.argmax()]
            else:
                predict_label_ids = []
                predict_label_idx = np.argsort(-predict_prob)
                for j in range(0, config.eval.top_k):
                    if predict_prob[
                            predict_label_idx[j]] > config.eval.threshold:
                        predict_label_ids.append(predict_label_idx[j])
            predict_label_name = [predictor.dataset.id_to_label_map[predict_label_id] \
                    for predict_label_id in predict_label_ids]

        self.write(";".join(predict_label_name) + "\n")
예제 #18
0
def main():
    model = None
    if len(sys.argv) > 1:
        model = sys.argv[1]
        print 'model load success'
    
    predictor = Predictor(model)

    for filename in glob.glob('./test_dataset/*'):
  
        direction = predictor.predict(filename)#stream)

        print direction
예제 #19
0
def test_factor_in_multiple_professors():
    course_dict = {}
    course_dict["1234"] = Course(title="title",
                                 semester="spring 2019",
                                 professor="professor",
                                 crn="123456",
                                 status="OPEN")
    course_dict["1234"].add_instance_of_course("spring 2019", "professor2",
                                               "56789", "OPEN")
    predictor = Predictor(course_dict["1234"],
                          [201903, 201902, 201901, 201803], "fall")
    predictor.factor_in_multiple_professors()
    assert predictor.score == 2
예제 #20
0
def test_factor_in_semester_offered_two_semesters():
    course_dict = {}
    course_dict["1234"] = Course(title="title",
                                 semester="spring 2019",
                                 professor="professor",
                                 crn="123456",
                                 status="OPEN")
    course_dict["1234"].add_instance_of_course("fall 2019", "professor",
                                               "56789", "OPEN")
    predictor = Predictor(course_dict["1234"],
                          [201903, 201902, 201901, 201803], "summer")
    predictor.factor_in_semester_offered()
    assert predictor.score == -4
    def crossValidate(train_ratio, test_ratio):
        data_train, target_train, data_test, target_test = \
                Dataset.getTrainTestData(EnvConfig.TRAIN_DATA_PATH,
                        train_ratio=train_ratio, test_ratio=test_ratio)

        Trainer.train(data_train, target_train)

        predictor = Predictor()
        predictor.loadModel()
        predicted = predictor.predict(data_test)

        precision = numpy.mean(predicted == target_test)
        return precision
예제 #22
0
def main(_):
    config = Config(config_file='conf/fasttext_token_char.config')
    predictor = Predictor(config)
    predict_probs = []
    standard_labels = []
    logger = util.Logger(config)
    if not os.path.exists(config.eval.eval_dir):
        os.makedirs(config.eval.eval_dir)
    with codecs.open(config.eval.eval_dir + "/predict.txt",
                     "w",
                     encoding=util.CHARSET) as predict_file:
        texts = []
        for line in codecs.open(config.eval.text_file, "r",
                                encoding='gb18030'):
            line = line.strip("\n")
            texts.append(line)
        batch_size = config.eval.batch_size
        epochs = math.ceil(len(texts) / batch_size)

        for i in range(epochs):
            predicts = predictor.predict(texts[i * batch_size:(i + 1) *
                                               batch_size])
            for k in range(len(predicts)):
                predict_result = "Nil\t0"
                predict = predicts[k]
                line = texts[i * batch_size + k]
                if predict is not None:
                    predict_np = np.array(predict[0], dtype=np.float32)
                    predict_label = predictor.data_processor.id_to_label_map[
                        np.argmax(predict_np)]
                    predict_result = "%s\t%f" % (predict_label,
                                                 np.max(predict_np))
                    predict_probs.append(predict[0])
                    standard_labels.append(line.split("\t")[0])
                predict_file.write(predict_result + "\t" + line + "\n")
    evaluator = Evaluator(config.eval.eval_dir)
    multi_label = config.eval.multi_label
    (precision_list, recall_list, fscore_list,
     standard_list) = evaluator.evaluate(predict_probs, standard_labels,
                                         predictor.data_processor.label_map,
                                         config.eval.threshold, multi_label)
    logger.info(
        "Test performance, precision: %f, recall: %f, f1: %f,  standard: %d" %
        (
            precision_list[0][evaluator.MICRO_AVERAGE],
            recall_list[0][evaluator.MICRO_AVERAGE],
            fscore_list[0][evaluator.MICRO_AVERAGE],
            standard_list[0][evaluator.MICRO_AVERAGE],
        ))
    evaluator.save()
예제 #23
0
def main():

    args = config_parser()
    config = MyConfiguration()

    # for duplicating
    torch.backends.cudnn.benchmark = True
    torch.backends.cudnn.deterministic = True
    torch.manual_seed(config.random_seed)
    random.seed(config.random_seed)
    np.random.seed(config.random_seed)

    # model load the pre-trained weight, load ckpt once out of predictor
    model = ESFNet(config=config).to(
        'cuda:{}'.format(args.gpu) if args.gpu >= 0 else 'cpu')
    ckpt = torch.load(
        args.ckpt_path,
        map_location='cuda:{}'.format(args.gpu) if args.gpu >= 0 else 'cpu')
    model.load_state_dict(ckpt['state_dict'])

    # path for each high-resolution images -> crop -> predict -> merge
    source_image_pathes = glob.glob(os.path.join(args.input, '*.png'))
    for source_image in tqdm(source_image_pathes):
        # get high-resolution image name
        filename = source_image.split('/')[-1].split('.')[0]
        # cropper get patches and save to --input/patches
        c = Cropper(args=args, configs=config, predict=True)
        _, n_w, n_h, image_h, image_w = c.image_processor(
            image_path=source_image)
        my_dataset = dataset_predict(args=args)
        my_dataloader = data.DataLoader(my_dataset,
                                        batch_size=args.batch_size,
                                        shuffle=False,
                                        pin_memory=args.pin_memory,
                                        drop_last=False,
                                        num_workers=args.nb_workers)

        # predict using pre-trained network
        p = Predictor(args=args, model=model, dataloader_predict=my_dataloader)
        p.predict()
        # patches [total_size, C, H, W] p.patches tensor -> reshape -> [total_size, H, W, C]
        patches_tensor = torch.transpose(p.patches, 1, 3)
        patches_tensor = patches_tensor.view(n_h, n_w, config.cropped_size,
                                             config.cropped_size, 3)
        # merge and save the output image
        patches = patches_tensor.cpu().numpy()
        img = unpatchify(patches, image_h, image_w)
        #img = Image.fromarray(img)
        save_path = os.path.join(args.output, 'remerge', filename + '.png')
        cv2.imwrite(save_path, img)
예제 #24
0
def main():
    # 权重文件 训练文件 预测文件
    train_path = './data/train.csv'
    test_path = './data/test.csv'
    result_path = './data/gender_submission.csv'

    # 数据预处理
    data = Dealer(train_path, test_path)
    data = data.load_clean()

    # 预测模型可选:mlp, bayes, ranforest, voting
    print('\n==================== Predicting... ====================\n')
    predictor = Predictor('voting')
    pred_result = predictor.predict(data)
    pred_result.to_csv(result_path, index=False)
    print('================= Prediction Generated. =================\n')
예제 #25
0
def main(train_method):

    clf_model = Model()
    clf_model.data_preprocessing()

    predictor = Predictor()

    # clf_model.data_analysis(clf_model.eval_label[1])
    # clf_model.data_analysis(clf_model.train_label[1])

    if train_method == 'nb': clf_model.naive_bayes()
    elif train_method == 'svm': clf_model.svm()
    elif train_method == 'dt': clf_model.decision_tree()
    elif train_method == 'rf': clf_model.random_forest()
    elif train_method == 'knn': clf_model.knn()
    else: print('No methods found')
예제 #26
0
def calc_relative_error(
        topology_data: TopologyData, training_dataset: TestResults,
        validation_datasets: Iterable[TestResults]) -> pd.DataFrame:
    """Returns DataFrame with columns: training_dataset, msg_len, ping, predicted_ping, rel_error.
    """
    validation_data = join_ping_data(dataset
                                     for dataset in validation_datasets)
    predictor = Predictor(topology_data, training_dataset)
    # build df with cols: msg_len, ping, predicted_ping
    predictions = predictor.predict_many(validation_data).drop(
        ["node1", "node2"], axis=1)
    abs_error = (predictions["value"] - predictions["predicted_ping"]).abs()
    predictions["rel_error"] = (abs_error / predictions["value"]) \
        .fillna(0)  # 0/0 returns NaN. I replace it with 0.
    predictions["training_dataset"] = training_dataset.name
    return predictions
예제 #27
0
def main():
    req = request.get_json()
    imageBase64 = req["imageBase64"]

    predictorInstance = Predictor(model, imageBase64)
    probabilities, predictedLabel, predictedClass = predictorInstance.predict()

    prediction = {}
    prediction["probabilities"] = {
        "NoDR": probabilities[0],
        "DR": probabilities[1]
    }
    prediction["label"] = predictedLabel
    prediction["class"] = predictedClass

    print()
    print(prediction)
    return json.dumps(prediction)
 def __init__(self):
     tk.Tk.__init__(self)
     self.predictor = Predictor()
     self.x = self.y = 0
     self.canvas = tk.Canvas(self,
                             width=width,
                             height=height,
                             cursor="cross")
     self.canvas.pack(side="top", fill="both", expand=True)
     self.canvas.bind("<ButtonPress-1>", self.on_button_press)
     self.previous_x = None
     self.previous_y = None
     self.canvas.bind("<B1-Motion>", self.on_button_move)
     self.canvas.bind("<ButtonRelease-1>", self.on_button_release)
     self.canvas.bind("<Button-3>", self.reset)
     # create draw stuff
     self.memImage = Image.new("L", (width, height),
                               "white")  #fill with white
     self.draw = ImageDraw.Draw(self.memImage)
예제 #29
0
 def __init__(self):
     tk.Tk.__init__(self)
     self.predictor = Predictor()
     self.x = self.y = 0
     self.canvas = tk.Canvas(self, width=256, height=256, cursor="cross")
     self.canvas.pack(side="top", fill="both", expand=True)
     self.var = tk.StringVar()
     self.l = tk.Label(textvar=self.var, bg='white', width=50,
                       height=3)  # 参数textvar不同于text,bg是backgroud
     self.l.pack()
     self.canvas.bind("<ButtonPress-1>", self.on_button_press)
     self.previous_x = None
     self.previous_y = None
     self.canvas.bind("<B1-Motion>", self.on_button_move)
     self.canvas.bind("<ButtonRelease-1>", self.on_button_release)
     self.canvas.bind("<Button-3>", self.reset)
     # create draw stuff
     self.memImage = Image.new("L", (256, 256), "white")  #fill with white
     self.draw = ImageDraw.Draw(self.memImage)
예제 #30
0
    def __init__(self):
        super(Window, self).__init__()
        self.site = "xvideos"
        self.xpaths = get_niche_xpaths(self.site)
        self.start_pg = 0
        self.max_pgs = 0
        self.cur_vid = None
        self.cur_img = None

        self.winlock = RLock()
        self.thr = None

        self.db = Database()
        self.q = PriorityQueue()

        self.default_img_flag = True

        self.init_ui()
        self.predict = Predictor()