def __init__(self): """ constructor """ self.__pre_processor = PreProcessor() self.__trainer = Trainer() self.__predictor = Predictor()
def get_validation_metrics(model: NeuralMachineTranslator, iterations: int, training_evaluator: Evaluator, validation_evaluator: Evaluator, training_iterator: BucketIterator, validation_iterator: BucketIterator) -> Tuple[Metrics, Metrics]: # get predictor predictor = Predictor(model) validation_evaluator.clear_sentences() # loop over validation sentences and add predictions to evaluator for i in range(iterations): validation_batch = next(iter(validation_iterator)) predicted_sentence, _ = predictor.predict(validation_batch) validation_evaluator.add_sentences(validation_batch.trg[0], predicted_sentence, model.EOS) # get validation metrics validation_metrics = Metrics(validation_evaluator.bleu(), validation_evaluator.ter(), 0) training_evaluator.clear_sentences() # get 50 batches from training data and add predictions to evaluator for i in range(50): batch = next(iter(training_iterator)) predicted_sentence, _ = predictor.predict(batch) training_evaluator.add_sentences(batch.trg[0], predicted_sentence, model.EOS) # get training metrics training_metrics = Metrics(training_evaluator.bleu(), training_evaluator.ter(), 0) return validation_metrics, training_metrics
def run(): predictor = Predictor() predictor.load_model() cv2.namedWindow("Webcam") capture = cv2.VideoCapture(0) if not capture.isOpened(): return while True: rval, frame = capture.read() if not rval: break image = frame gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) for face_info in predictor.predict(gray_image): draw_face_info(image, face_info) draw_landmarks(image, get_all_landmarks(gray_image), draw_dots=True, visualize=False) cv2.imshow("Webcam", image) key = cv2.waitKey(1) if key == 27 or key == ord('q'): # exit on ESC or Q break cv2.destroyWindow("Webcam") capture.release()
def __init__(self): super(Window, self).__init__() self.site = "xvideos" self.xpaths = get_niche_xpaths(self.site) self.start_pg = 0 self.max_pgs = 0 self.cur_vid = None self.cur_img = None self.last_pred = None self.default_img_flag = True self.preview_size = 2**9 # arbitrary number self.winlock = RLock() self.thr = None self.q = PriorityQueue() self.db = Database() self.scraper = site_selector(self.site) self.set_keybindings() # Create the entirety of the GUI and # link to appropriate functions. self.setWindowTitle('P**n!') self.layout = QtGui.QHBoxLayout() self.init_left_pane() self.init_middle_pane() self.init_right_pane() self.setLayout(self.layout) self.show() self.predict = Predictor()
def status(self): # INPUT_DATA is a dataframe PreprocessedData = preprocess(self.DATA) # preprocessed data is a dataframe model = Predictor() status = model.predict(PreprocessedData) return status
def __init__(self, bert_config_file, is_training, num_labels, train_file, dev_file, vocab_file, output_dir, max_seq_length, learning_rate, batch_size, epochs, warmup_proportion, virtual_batch_size_ratio, evaluate_every, init_ckpt): os.system(f"mkdir {output_dir}") self._data_train = Dataset(train_file, num_labels, vocab_file, True, output_dir, True, max_seq_length) self._dev_data = Dataset(dev_file, num_labels, vocab_file, True, output_dir, False, max_seq_length) num_train_step = int(self._data_train.size / batch_size * epochs) num_warmup_step = int(num_train_step * warmup_proportion) self._model = Model(bert_config_file, max_seq_length, init_ckpt, is_training, num_labels) self._train_op, self._global_step = optimization.create_optimizer( self._model.loss, learning_rate, num_train_step, num_warmup_step, False, virtual_batch_size_ratio) self.batch_size = batch_size self.epochs = epochs self.evaluate_every = evaluate_every self.output_dir = output_dir self._predictor = Predictor(bert_config_file, max_seq_length, num_labels)
class Runner(object): """ 全体の処理を実行させる """ __this_file_name = sys.modules[__name__].__name__ def __init__(self): """ constructor """ self.__pre_processor = PreProcessor() self.__trainer = Trainer() self.__predictor = Predictor() def run(self): """ 処理を実行する :return: """ # create model # model = self.__trainer.fit_model() model = self.__trainer.adaboost() # predict predict_data = self.__predictor.predict(model) print(predict_data) self.__predictor.write_file_submit(predict_data)
def __init__(self, w, h, og_w, custom, obj_name, is_ofcalc=False): self.cheat_data = self.read_csv('../detector/data/data.csv') self.obj_name = obj_name self.c_start = 0 self.prev_imgs = [] self.resize_w = w self.resize_h = h self.multi = w / og_w if custom: self.start_idx = 1 self.label_idx = 0 else: self.start_idx = 8 self.label_idx = 1 self.is_ofcalc = is_ofcalc self.p = Predictor(self.resize_w, self.resize_h, 'models/model_detectorv2.h5') self.of_w = 376 self.of_h = 240 self.skipped_frames = 3 self.rect_color = (10, 125, 10) self.font_size = 1 self.font_thickness = 1
def main(arguments): model_parameters = { 'scale': arguments['scale'], 'learning_rate': 1e-5, 'D': arguments['D'], 'C': arguments['C'], 'G': arguments['G'], 'kernel_size': 3, 'c_dim': 3, 'G0': arguments['G0'], } model = load_model(model_parameters, arguments['vgg'], verbose=arguments['verbose']) if arguments['summary'] is True: model.rdn.summary() if arguments['train'] is True: from trainer.train import Trainer trainer = Trainer(train_arguments=arguments) trainer.train_model(model) if arguments['test'] is True: from predict import Predictor predictor = Predictor(test_arguments=arguments) predictor.get_predictions(model)
def autonomous_control(model): """Run the car autonomously""" predictor = Predictor(model) with picamera.PiCamera() as camera: camera.resolution = configuration.PICAMERA_RESOLUTION camera.framerate = configuration.PICAMERA_FRAMERATE time.sleep(configuration.PICAMERA_WARM_UP_TIME) camera.rotation = 180 pwm = motor_driver_helper.get_pwm_imstance() motor_driver_helper.start_pwm(pwm) forward_cycle_count = left_cycle_count = right_cycle_count = 0 should_brake = False while True: stream = io.BytesIO() camera.capture(stream, format='jpeg', use_video_port=True) direction = predictor.predict(stream) image_helper.save_image_with_direction(stream, direction) stream.flush() if direction == 'forward': should_brake = True left_cycle_count = right_cycle_count = 0 forward_cycle_count = reduce_speed(pwm, forward_cycle_count) motor_driver_helper.set_front_motor_to_idle() motor_driver_helper.set_forward_mode() elif direction == 'left': should_brake = True forward_cycle_count = right_cycle_count = 0 left_cycle_count = increase_speed_on_turn( pwm, left_cycle_count) motor_driver_helper.set_left_mode() motor_driver_helper.set_forward_mode() elif direction == 'right': should_brake = True forward_cycle_count = left_cycle_count = 0 right_cycle_count = increase_speed_on_turn( pwm, right_cycle_count) motor_driver_helper.set_right_mode() motor_driver_helper.set_forward_mode() elif direction == 'reverse': should_brake = True #motor_driver_helper.set_front_motor_to_idle() motor_driver_helper.set_right_mode() motor_driver_helper.set_reverse_mode() else: if should_brake: print("braking...") #motor_driver_helper.set_reverse_mode() time.sleep(0.2) should_brake = False motor_driver_helper.set_idle_mode() forward_cycle_count = left_cycle_count = right_cycle_count = 0 motor_driver_helper.change_pwm_duty_cycle(pwm, 100) print(direction)
def test_factor_in_enrollment(status, semester, score): course_dict = {} course_dict["1234"] = Course(title="title", semester=semester, professor="proferssor", crn="123456", status=status) predictor = Predictor(course_dict["1234"], [201903, 201902, 201901, 201803], "fall") predictor.factor_in_enrollment() assert predictor.score == score
def filter(f1_recoder: dict, cur_model: str, args): predictor = Predictor(os.path.join(cur_model, 'model.pt'), args.word_vocab, args.label_vocab, 'data/dev/word.txt', 'data/dev/label.txt', args.cuda) score = predictor.predict() f1_recoder[score] = cur_model if len(f1_recoder) > 10: # remove the worst model key = sorted(f1_recoder.keys(), reverse=True)[-1] path = f1_recoder.pop(key) shutil.rmtree(path)
def test_within_last_year(semester, result): course_dict = {} course_dict["1234"] = Course(title="title", semester=semester, professor="professor", crn="123456", status="OPEN") predictor = Predictor(course_dict["1234"], [201903, 201902, 201901, 201803], "fall") assert predictor.within_last_year( course_dict["1234"].instances["123456"]) == result
def test_likely_to_be_offered(): course_dict = {} course_dict["1234"] = Course(title="title", semester="spring 2019", professor="professor", crn="123456", status="OPEN") predictor = Predictor(course_dict["1234"], [201903, 201902, 201901, 201803], "summer") predictor.score = 3 assert not predictor.likely_to_be_offered()
def __init__(self): self.t_step = 0 self.max_step = 10 self.done = False self.goal = 0.6 #0:do noting, 1:contrast(1.1), 2:contrast(0.9), 3:saturation(1.1), 4:saturation(0.9), # 5:exposure(1.1), 6:exposure(0.9) self.t_action = ["0", "1", "2", "3", "4", "5", "6"] self.actions = {} self.get_reward = Predictor()
def post(self): sentence = self.get_argument('sentence') #lan=0 English lan=1 Chinese lan = self.get_argument('language') print('sentence passed is %s' % sentence) #sentence = '{"doc_label": ["Computer--MachineLearning--DeepLearning", "Neuro--ComputationalNeuro"],"doc_token": ["I", "love", "deep", "learning"],"doc_keyword": ["deep learning"],"doc_topic": ["AI", "Machine learning"]}' config = Config(config_file='conf/train.json') if lan == '0': config = Config(config_file='conf/train.json') if lan == '1': print('trains.json used') config = Config(config_file='conf/train2.json') predictor = Predictor(config) batch_size = config.eval.batch_size input_texts = [] predict_probs = [] is_multi = config.task_info.label_type == ClassificationType.MULTI_LABEL #TODO pass sentence as input_texts #for line in codecs.open(sys.argv[2], "r", predictor.dataset.CHARSET): # input_texts.append(line.strip("\n")) # epoches = math.ceil(len(input_texts)/batch_size) # for line in iter(sentence, "\n"): # print('current line is %s' %line) # input_texts.append(line.strip("\n")) # epoches = math.ceil(len(input_texts)/batch_size) input_texts.append(sentence.strip("\n")) epoches = math.ceil(len(input_texts) / batch_size) print('input_texts needed to be predicted is %s' % input_texts) for i in range(epoches): batch_texts = input_texts[i * batch_size:(i + 1) * batch_size] predict_prob = predictor.predict(batch_texts) for j in predict_prob: predict_probs.append(j) for predict_prob in predict_probs: if not is_multi: predict_label_ids = [predict_prob.argmax()] else: predict_label_ids = [] predict_label_idx = np.argsort(-predict_prob) for j in range(0, config.eval.top_k): if predict_prob[ predict_label_idx[j]] > config.eval.threshold: predict_label_ids.append(predict_label_idx[j]) predict_label_name = [predictor.dataset.id_to_label_map[predict_label_id] \ for predict_label_id in predict_label_ids] self.write(";".join(predict_label_name) + "\n")
def main(): model = None if len(sys.argv) > 1: model = sys.argv[1] print 'model load success' predictor = Predictor(model) for filename in glob.glob('./test_dataset/*'): direction = predictor.predict(filename)#stream) print direction
def crossValidate(train_ratio, test_ratio): data_train, target_train, data_test, target_test = \ Dataset.getTrainTestData(EnvConfig.TRAIN_DATA_PATH, train_ratio=train_ratio, test_ratio=test_ratio) Trainer.train(data_train, target_train) predictor = Predictor() predictor.loadModel() predicted = predictor.predict(data_test) precision = numpy.mean(predicted == target_test) return precision
def test_factor_in_semester_offered_two_semesters(): course_dict = {} course_dict["1234"] = Course(title="title", semester="spring 2019", professor="professor", crn="123456", status="OPEN") course_dict["1234"].add_instance_of_course("fall 2019", "professor", "56789", "OPEN") predictor = Predictor(course_dict["1234"], [201903, 201902, 201901, 201803], "summer") predictor.factor_in_semester_offered() assert predictor.score == -4
def test_factor_in_multiple_professors(): course_dict = {} course_dict["1234"] = Course(title="title", semester="spring 2019", professor="professor", crn="123456", status="OPEN") course_dict["1234"].add_instance_of_course("spring 2019", "professor2", "56789", "OPEN") predictor = Predictor(course_dict["1234"], [201903, 201902, 201901, 201803], "fall") predictor.factor_in_multiple_professors() assert predictor.score == 2
def main(_): config = Config(config_file='conf/fasttext_token_char.config') predictor = Predictor(config) predict_probs = [] standard_labels = [] logger = util.Logger(config) if not os.path.exists(config.eval.eval_dir): os.makedirs(config.eval.eval_dir) with codecs.open(config.eval.eval_dir + "/predict.txt", "w", encoding=util.CHARSET) as predict_file: texts = [] for line in codecs.open(config.eval.text_file, "r", encoding='gb18030'): line = line.strip("\n") texts.append(line) batch_size = config.eval.batch_size epochs = math.ceil(len(texts) / batch_size) for i in range(epochs): predicts = predictor.predict(texts[i * batch_size:(i + 1) * batch_size]) for k in range(len(predicts)): predict_result = "Nil\t0" predict = predicts[k] line = texts[i * batch_size + k] if predict is not None: predict_np = np.array(predict[0], dtype=np.float32) predict_label = predictor.data_processor.id_to_label_map[ np.argmax(predict_np)] predict_result = "%s\t%f" % (predict_label, np.max(predict_np)) predict_probs.append(predict[0]) standard_labels.append(line.split("\t")[0]) predict_file.write(predict_result + "\t" + line + "\n") evaluator = Evaluator(config.eval.eval_dir) multi_label = config.eval.multi_label (precision_list, recall_list, fscore_list, standard_list) = evaluator.evaluate(predict_probs, standard_labels, predictor.data_processor.label_map, config.eval.threshold, multi_label) logger.info( "Test performance, precision: %f, recall: %f, f1: %f, standard: %d" % ( precision_list[0][evaluator.MICRO_AVERAGE], recall_list[0][evaluator.MICRO_AVERAGE], fscore_list[0][evaluator.MICRO_AVERAGE], standard_list[0][evaluator.MICRO_AVERAGE], )) evaluator.save()
def autonomous_control(model): """Run the car autonomously""" predictor = Predictor(model) with picamera.PiCamera() as camera: camera.resolution = configuration.PICAMERA_RESOLUTION camera.framerate = configuration.PICAMERA_FRAMERATE time.sleep(configuration.PICAMERA_WARM_UP_TIME) pwm = motor_driver_helper.get_pwm_imstance() motor_driver_helper.start_pwm(pwm) forward_cycle_count = left_cycle_count = right_cycle_count = 0 should_brake = False while True: stream = io.BytesIO() camera.capture(stream, format='jpeg', use_video_port=True) direction = predictor.predict(stream) image_helper.save_image_with_direction(stream, direction) stream.flush() if direction == 'forward': should_brake = True left_cycle_count = right_cycle_count = 0 forward_cycle_count = reduce_speed(pwm, forward_cycle_count) motor_driver_helper.set_front_motor_to_idle() motor_driver_helper.set_forward_mode() elif direction == 'left': should_brake = True forward_cycle_count = right_cycle_count = 0 left_cycle_count = increase_speed_on_turn(pwm, left_cycle_count) motor_driver_helper.set_left_mode() motor_driver_helper.set_forward_mode() elif direction == 'right': should_brake = True forward_cycle_count = left_cycle_count = 0 right_cycle_count = increase_speed_on_turn(pwm, right_cycle_count) motor_driver_helper.set_right_mode() motor_driver_helper.set_forward_mode() elif direction == 'reverse': should_brake = True motor_driver_helper.set_front_motor_to_idle() motor_driver_helper.set_reverse_mode() else: if should_brake: print("braking...") motor_driver_helper.set_reverse_mode() time.sleep(0.2) should_brake = False motor_driver_helper.set_idle_mode() forward_cycle_count = left_cycle_count = right_cycle_count = 0 motor_driver_helper.change_pwm_duty_cycle(pwm, 100) print(direction)
def main(): args = config_parser() config = MyConfiguration() # for duplicating torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = True torch.manual_seed(config.random_seed) random.seed(config.random_seed) np.random.seed(config.random_seed) # model load the pre-trained weight, load ckpt once out of predictor model = ESFNet(config=config).to( 'cuda:{}'.format(args.gpu) if args.gpu >= 0 else 'cpu') ckpt = torch.load( args.ckpt_path, map_location='cuda:{}'.format(args.gpu) if args.gpu >= 0 else 'cpu') model.load_state_dict(ckpt['state_dict']) # path for each high-resolution images -> crop -> predict -> merge source_image_pathes = glob.glob(os.path.join(args.input, '*.png')) for source_image in tqdm(source_image_pathes): # get high-resolution image name filename = source_image.split('/')[-1].split('.')[0] # cropper get patches and save to --input/patches c = Cropper(args=args, configs=config, predict=True) _, n_w, n_h, image_h, image_w = c.image_processor( image_path=source_image) my_dataset = dataset_predict(args=args) my_dataloader = data.DataLoader(my_dataset, batch_size=args.batch_size, shuffle=False, pin_memory=args.pin_memory, drop_last=False, num_workers=args.nb_workers) # predict using pre-trained network p = Predictor(args=args, model=model, dataloader_predict=my_dataloader) p.predict() # patches [total_size, C, H, W] p.patches tensor -> reshape -> [total_size, H, W, C] patches_tensor = torch.transpose(p.patches, 1, 3) patches_tensor = patches_tensor.view(n_h, n_w, config.cropped_size, config.cropped_size, 3) # merge and save the output image patches = patches_tensor.cpu().numpy() img = unpatchify(patches, image_h, image_w) #img = Image.fromarray(img) save_path = os.path.join(args.output, 'remerge', filename + '.png') cv2.imwrite(save_path, img)
def calc_relative_error( topology_data: TopologyData, training_dataset: TestResults, validation_datasets: Iterable[TestResults] ) -> pd.DataFrame: """Returns DataFrame with columns: training_dataset, msg_len, ping, predicted_ping, rel_error. """ validation_data = join_ping_data(dataset for dataset in validation_datasets) predictor = Predictor(topology_data, training_dataset) # build df with cols: msg_len, ping, predicted_ping predictions = predictor.predict_many(validation_data).drop(["node1", "node2"], axis=1) abs_error = (predictions["value"] - predictions["predicted_ping"]).abs() predictions["rel_error"] = (abs_error / predictions["value"]) \ .fillna(0) # 0/0 returns NaN. I replace it with 0. predictions["training_dataset"] = training_dataset.name return predictions
def main(): # 权重文件 训练文件 预测文件 train_path = './data/train.csv' test_path = './data/test.csv' result_path = './data/gender_submission.csv' # 数据预处理 data = Dealer(train_path, test_path) data = data.load_clean() # 预测模型可选:mlp, bayes, ranforest, voting print('\n==================== Predicting... ====================\n') predictor = Predictor('voting') pred_result = predictor.predict(data) pred_result.to_csv(result_path, index=False) print('================= Prediction Generated. =================\n')
def calc_relative_error( topology_data: TopologyData, training_dataset: TestResults, validation_datasets: Iterable[TestResults]) -> pd.DataFrame: """Returns DataFrame with columns: training_dataset, msg_len, ping, predicted_ping, rel_error. """ validation_data = join_ping_data(dataset for dataset in validation_datasets) predictor = Predictor(topology_data, training_dataset) # build df with cols: msg_len, ping, predicted_ping predictions = predictor.predict_many(validation_data).drop( ["node1", "node2"], axis=1) abs_error = (predictions["value"] - predictions["predicted_ping"]).abs() predictions["rel_error"] = (abs_error / predictions["value"]) \ .fillna(0) # 0/0 returns NaN. I replace it with 0. predictions["training_dataset"] = training_dataset.name return predictions
class Evaluator(): "Evaluate a specific model over a validation set, saving metrics with the model." def __init__(self): self.predictor = Predictor() def _load_eval_set(self, eval_file): self.eval_df = pd.read_pickle(eval_file) def evaluate(self, version): """- Load validation data frame - Maps previously unseed countries to OTHER - Preprocesses data frame for additional features - Run prediction - Calculate confusion matrix and classification report with precision/recall/f-score for each country - Save metrics to separate file in model directory for future reference (could also save as a json for easy automatic loading and comparison) """ self.predictor.load_model(version) self._load_eval_set(f"{DATA_DIR}/validation.pkl") # Deal with issue of countries unseen in training data self.eval_df['COUNTRY.OF.ORIGIN'].fillna('OTHER', inplace=True) seen_countries = set(self.predictor.label_encoder.classes_) self.eval_df['COUNTRY.OF.ORIGIN.MAPPED'] = self.eval_df[ 'COUNTRY.OF.ORIGIN'].apply(lambda x: 'OTHER' if x not in seen_countries else x) gold_labels = self.predictor.label_encoder.transform( self.eval_df['COUNTRY.OF.ORIGIN.MAPPED']) predictions = self.predictor.predict(self.eval_df) conf_mat = confusion_matrix(gold_labels, predictions) logger.info("Confusion matrix", confusion_matrix=conf_mat) report = metrics.classification_report( gold_labels, predictions, target_names=self.predictor.label_encoder.classes_) logger.info("Classification report", report=report) metrics_path = os.path.join(MODEL_DIR, version, METRICS_FILE) # Having trouble getting full matrix to print, will do later with open(metrics_path, "w") as metrics_fd: metrics_fd.write(f"* Confusion matrix:\n{conf_mat}\n") metrics_fd.write(f"* Classification report:\n{report}")
def allocate(self, points_per_network, W, num_layers, step, max_iterations): n = randint(self.n1, self.n2) while n in self.predictor_array: n = randint(self.n1, self.n2) self.predictor_array[n] = Predictor(points_per_network, W, num_layers, step, max_iterations) return n
def main(): req = request.get_json() imageBase64 = req["imageBase64"] predictorInstance = Predictor(model, imageBase64) probabilities, predictedLabel, predictedClass = predictorInstance.predict() prediction = {} prediction["probabilities"] = { "NoDR": probabilities[0], "DR": probabilities[1] } prediction["label"] = predictedLabel prediction["class"] = predictedClass print() print(prediction) return json.dumps(prediction)
def job_main(): beanstalk = create_beanstalk() print "Job queue starts..." try: while True: try: job = beanstalk.reserve() except beanstalkc.DeadlineSoon: continue request = json.loads(job.body) jobId = job.jid print 'Working on job %s...' % jobId try: jobType = request["jobType"] if jobType == TRAIN: category = request["category"] model = request["model"] trainer = Trainer.create(category, model) if trainer: data = {} data["table_name"] = request["inputTableName"] data["feature_names"] = request.get("features", None) data["target_name"] = request.get("target", None) ret = trainer.run(**data) print 'Job %s finished.' % jobId else: ret = [] print 'No trainer for job %s.' % jobId elif jobType == PREDICT: modelId = request["modelId"] predictor = Predictor(modelId) data = {} data["table_name"] = request["inputTableName"] ret = predictor.run(**data) print 'Job %s finished.' % jobId except: ret = [] print 'Error on job %s.' % jobId job.delete() #time.sleep(30) io_loop.add_callback(job_finished, jobId, ret) except (KeyboardInterrupt, SystemExit): beanstalk.close() sys.exit()
def __init__(self): tk.Tk.__init__(self) self.predictor = Predictor() self.x = self.y = 0 self.canvas = tk.Canvas(self, width=width, height=height, cursor="cross") self.canvas.pack(side="top", fill="both", expand=True) self.canvas.bind("<ButtonPress-1>", self.on_button_press) self.previous_x = None self.previous_y = None self.canvas.bind("<B1-Motion>", self.on_button_move) self.canvas.bind("<ButtonRelease-1>", self.on_button_release) self.canvas.bind("<Button-3>", self.reset) # create draw stuff self.memImage = Image.new("L", (width, height), "white") #fill with white self.draw = ImageDraw.Draw(self.memImage)
def __init__(self): tk.Tk.__init__(self) self.predictor = Predictor() self.x = self.y = 0 self.canvas = tk.Canvas(self, width=256, height=256, cursor="cross") self.canvas.pack(side="top", fill="both", expand=True) self.var = tk.StringVar() self.l = tk.Label(textvar=self.var, bg='white', width=50, height=3) # 参数textvar不同于text,bg是backgroud self.l.pack() self.canvas.bind("<ButtonPress-1>", self.on_button_press) self.previous_x = None self.previous_y = None self.canvas.bind("<B1-Motion>", self.on_button_move) self.canvas.bind("<ButtonRelease-1>", self.on_button_release) self.canvas.bind("<Button-3>", self.reset) # create draw stuff self.memImage = Image.new("L", (256, 256), "white") #fill with white self.draw = ImageDraw.Draw(self.memImage)
def __init__(self): super(Window, self).__init__() self.site = "xvideos" self.xpaths = get_niche_xpaths(self.site) self.start_pg = 0 self.max_pgs = 0 self.cur_vid = None self.cur_img = None self.winlock = RLock() self.thr = None self.db = Database() self.q = PriorityQueue() self.default_img_flag = True self.init_ui() self.predict = Predictor()
class Window(QtGui.QWidget): """ The graphical interface to the software that the user sees. """ def __init__(self): super(Window, self).__init__() self.site = "xvideos" self.xpaths = get_niche_xpaths(self.site) self.start_pg = 0 self.max_pgs = 0 self.cur_vid = None self.cur_img = None self.winlock = RLock() self.thr = None self.db = Database() self.q = PriorityQueue() self.default_img_flag = True self.init_ui() self.predict = Predictor() def init_ui(self): """ Create the entirety of the GUI and link to appropriate functions. """ self.setWindowTitle('P**n!') self.layout = QtGui.QGridLayout() # NICHE COMBO: make dropdown menu to select a fetish self.niche_combo = QtGui.QComboBox(self) keys = sorted(self.xpaths.keys()) for k in keys: self.niche_combo.addItem(k) self.niche_combo.setCurrentIndex(0) self.niche = keys[0] self.niche_combo.activated[str].connect(self.set_niche) self.layout.addWidget(self.niche_combo, 0, 0, 1, 2) # START PG: spinbox to indicate the page to start scraping on self.start_lbl = QtGui.QLabel(self) self.start_lbl.setText("start page") self.layout.addWidget(self.start_lbl, 2, 0, 1, 1) self.start_pg_spn = QtGui.QSpinBox(self) self.start_pg_spn.valueChanged[int].connect(self.set_start_pg) self.layout.addWidget(self.start_pg_spn, 3, 0, 1, 1) # NUM PGS: spinbox to indicate the number of pages to scrape self.n_pgs_lbl = QtGui.QLabel(self) self.n_pgs_lbl.setText("pages to scrape") self.layout.addWidget(self.n_pgs_lbl, 2, 1, 1, 1) self.n_pgs_spn = QtGui.QSpinBox(self) self.n_pgs_spn.setMinimum(1) self.n_pgs_spn.valueChanged[int].connect(self.set_max_pgs) self.layout.addWidget(self.n_pgs_spn, 3, 1, 1, 1) # PROGRESS BAR: tracks the progress of the scraper self.prog = QtGui.QProgressBar(self) self.layout.addWidget(self.prog, 6, 0, 1, 2) # SCRAPE: begin scraping self.scrape_btn = QtGui.QPushButton("scrape", self) self.scrape_btn.clicked.connect(self.scrape) self.layout.addWidget(self.scrape_btn, 7, 0, 1, 2) # RETRAIN: manual retraining of prediction algorithm self.train_btn = QtGui.QPushButton("recalculate prediction model", self) self.train_btn.clicked.connect(self.retrain) self.layout.addWidget(self.train_btn, 9, 0, 1, 2) # QUIT: make quit button self.quit_btn = QtGui.QPushButton("quit", self) self.quit_btn.clicked.connect(self.quit) self.layout.addWidget(self.quit_btn, 11, 0, 1, 2) # IMGS: images which will display the video preview img_len = 12 self.imgs = [] pic_num = 0 pixmap = QtGui.QPixmap("0.jpg") img_lbl = QtGui.QLabel(self) img_lbl.setPixmap(pixmap) self.imgs.append(img_lbl) self.layout.addWidget(img_lbl, 0, 3, img_len, img_len) # SLIDER: slide to rate the quality of the video self.slider = QtGui.QSlider(self, QtCore.Qt.Vertical) self.slider.setTickPosition(QtGui.QSlider.TicksBothSides) self.slider.setTickInterval(20) self.layout.addWidget(self.slider, 0, img_len + 3, img_len, 1) # RATE button self.rate_btn = QtGui.QPushButton("rate", self) self.rate_btn.clicked.connect(self.rate) self.layout.addWidget(self.rate_btn, 0, img_len + 4, 1, 2) # OPEN button self.open_btn = QtGui.QPushButton("open", self) self.open_btn.clicked.connect(lambda: webbrowser.open(self.cur_vid)) self.layout.addWidget(self.open_btn, 1, img_len + 4, 1, 2) # INFO box self.info_box = QtGui.QLabel(self) self.info_box.setText("") self.layout.addWidget(self.info_box, 3, img_len + 4, 1, 1) # SKIP button self.skip_btn = QtGui.QPushButton("skip", self) self.skip_btn.clicked.connect(self.skip) self.layout.addWidget(self.skip_btn, img_len - 1, img_len + 4, 1, 2) self.setLayout(self.layout) self.show() def retrain(self): QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor) self.predict.refit_from_scratch() redo_predictions(self.predict, self.q.qsize(), self.q) QtGui.QApplication.restoreOverrideCursor() def refresh_images(self): r = requests.get(self.cur_img) if r.status_code == 200: pixmap = QtGui.QPixmap() pixmap.loadFromData(r.content) pixmap.scaledToWidth(255) # 255 is just an arbitrary size pixmap.scaledToHeight(255) self.imgs[0].setPixmap(pixmap) self.imgs[0].update() self.repaint() r.close() data = self.db.get(self.cur_vid) self.setWindowTitle(data["name"]) info_str = "dur: {}\n\nviews: {}\n\nprediction: {}\n\ntags: {}" n_tags = 15 tag_str = "" if data["tags"]: tags = [tag for tag in data["tags"] if len(tag) > 2] tags = tags[:min(n_tags, len(data["tags"]))] for tag in tags: tag_str += "\n" + tag info_str = info_str.format(data["dur"], data["views"], # old design had an out of 6 scale round(self.last_pred*100/6, 2), tag_str) self.info_box.setText(info_str) def pop_video(self): if self.q.empty(): self.info_box.setText("Video queue empty.") self.repaint() return self.last_pred, self.cur_vid self.last_pred, self.cur_vid = self.q.get() self.last_pred *= -1 self.cur_img = self.db.get_img(self.cur_vid) return self.last_pred, self.cur_vid def set_start_pg(self, num): self.start_pg = num def set_max_pgs(self, num): self.max_pgs = num def scrape(self): if self.thr: self.thr.exit_flag = True while not self.thr.exit_ready: time.sleep(0.5) self.update_prog(0) self.thr = PopulateQ( self.site, self.niche, self.q, self.start_pg, self.max_pgs, self.winlock, self.predict ) self.thr.updateProgress.connect(self.update_prog) self.thr.start() def rate(self): self.db.give_feedback(self.cur_vid, self.slider.value()/100*6) data = self.db.get(self.cur_vid) data["feedback"] = self.slider.value() + 0.0001 # db doesn't like 0s with self.winlock: self.predict.fit(data) if self.q.empty(): self.infoBox.setText("Queue Empty") self.repaint() else: self.pop_video() self.refresh_images() def update_prog(self, progress): self.prog.setValue(progress) if self.default_img_flag: if not self.q.empty(): _, vid_url = self.pop_video() self.refresh_images() self.default_img_flag = False def update_prog_init(self, progress): self.prog.setValue(progress) self.repaint() def skip(self): if self.q.empty(): self.info_box.setText("Queue Empty") self.repaint() else: self.pop_video() self.refresh_images() def set_niche(self, text): if text == "select niche": self.niche = None self.niche = text def quit(self): self.db.cnx.close() self.predict.quit() if self.thr: self.thr.exit_flag = True for _ in range(4): if not self.thr.exit_ready: time.sleep(0.25) QtCore.QCoreApplication.instance().quit()
class Window(QtGui.QWidget): """ The graphical interface to the software that the user sees. """ def __init__(self): super(Window, self).__init__() self.site = "xvideos" self.xpaths = get_niche_xpaths(self.site) self.start_pg = 0 self.max_pgs = 0 self.cur_vid = None self.cur_img = None self.last_pred = None self.default_img_flag = True self.preview_size = 2**9 # arbitrary number self.winlock = RLock() self.thr = None self.q = PriorityQueue() self.db = Database() self.scraper = site_selector(self.site) self.set_keybindings() # Create the entirety of the GUI and # link to appropriate functions. self.setWindowTitle('P**n!') self.layout = QtGui.QHBoxLayout() self.init_left_pane() self.init_middle_pane() self.init_right_pane() self.setLayout(self.layout) self.show() self.predict = Predictor() def set_keybindings(self): bind = lambda k, f: QtGui.QShortcut(QtGui.QKeySequence(k), self, f) bind(QtCore.Qt.Key_Tab, self.skip) bind(QtCore.Qt.Key_Right, self.skip) bind(QtCore.Qt.Key_Left, self.unpop_video) bind(QtCore.Qt.Key_O, lambda: webbrowser.open(self.cur_vid)) # letter, not number # eval isn't working here for some reason; even with a macro, this is ugly bind(QtCore.Qt.Key_0, lambda: self.slider.setValue(0*11)) # number, not letter bind(QtCore.Qt.Key_1, lambda: self.slider.setValue(1*11)) bind(QtCore.Qt.Key_2, lambda: self.slider.setValue(2*11)) bind(QtCore.Qt.Key_3, lambda: self.slider.setValue(3*11)) bind(QtCore.Qt.Key_4, lambda: self.slider.setValue(4*11)) bind(QtCore.Qt.Key_5, lambda: self.slider.setValue(5*11)) bind(QtCore.Qt.Key_6, lambda: self.slider.setValue(6*11)) bind(QtCore.Qt.Key_7, lambda: self.slider.setValue(7*11)) bind(QtCore.Qt.Key_8, lambda: self.slider.setValue(8*11)) bind(QtCore.Qt.Key_9, lambda: self.slider.setValue(9*11)) bind(QtCore.Qt.Key_Enter, self.rate) # keypad enter bind(QtCore.Qt.Key_Return, self.rate) # regular enter def init_left_pane(self): self.left_pane = QtGui.QVBoxLayout() # NICHE COMBO: make dropdown menu to select a fetish self.niche_combo = QtGui.QComboBox(self) keys = sorted(self.xpaths.keys()) for k in keys: self.niche_combo.addItem(k) self.niche_combo.setCurrentIndex(0) self.niche = keys[0] self.niche_combo.activated[str].connect(self.set_niche) self.left_pane.addWidget(self.niche_combo) # START PG AND PGS TO SCRAPE self.left_pane.addSpacing(50) self.init_page_btns() self.left_pane.addSpacing(25) # PROGRESS BAR: tracks the progress of the scraper self.prog = QtGui.QProgressBar(self) self.left_pane.addWidget(self.prog) # SCRAPE: begin scraping self.scrape_btn = QtGui.QPushButton("scrape", self) self.scrape_btn.clicked.connect(self.scrape) self.left_pane.addWidget(self.scrape_btn) self.left_pane.addSpacing(25) # LOAD URL: load a specific url, presumably for rating. self.load_url_box = QtGui.QLineEdit() self.load_url_box.setPlaceholderText("load a specific url") self.feedback_spin = QtGui.QSpinBox() self.feedback_spin.setMaximum(100) # (ratings must be between 0 and 100) self.enter_btn = QtGui.QPushButton("save") # also on bottom is the enter/save button self.enter_btn.clicked.connect(lambda: self.save_usr_url()) self.load_url_extra = QtGui.QHBoxLayout() # put feedback and enter in one row self.load_url_extra.addWidget(self.feedback_spin) self.load_url_extra.addWidget(self.enter_btn) self.load_url_group = QtGui.QVBoxLayout() # group it all together self.load_url_group.addWidget(self.load_url_box) self.load_url_group.addLayout(self.load_url_extra) self.left_pane.addLayout(self.load_url_group) self.left_pane.addSpacing(25) # RETRAIN: manual retraining of prediction algorithm self.train_btn = QtGui.QPushButton("recalculate prediction model", self) self.train_btn.clicked.connect(self.retrain) self.left_pane.addWidget(self.train_btn) self.left_pane.addSpacing(50) # QUIT: make quit button self.quit_btn = QtGui.QPushButton("quit", self) self.quit_btn.clicked.connect(self.quit) self.left_pane.addWidget(self.quit_btn) self.layout.addLayout(self.left_pane) def init_middle_pane(self): self.mid_pane = QtGui.QHBoxLayout() # IMGS: images which will display the video preview pixmap = QtGui.QPixmap("0.jpg") pixmap.scaledToWidth(self.preview_size) pixmap.scaledToHeight(self.preview_size) img_lbl = QtGui.QLabel(self) img_lbl.setPixmap(pixmap) # Make sure the window isn't constantly resizing img_lbl.setScaledContents(True) img_lbl.setMaximumWidth(self.preview_size) img_lbl.setMaximumHeight(self.preview_size) self.img = img_lbl self.mid_pane.addWidget(self.img) # SLIDER: slide to rate the quality of the video self.slider = QtGui.QSlider(self, QtCore.Qt.Vertical) self.slider.setTickPosition(QtGui.QSlider.TicksBothSides) self.slider.setTickInterval(20) self.mid_pane.addWidget(self.slider) self.layout.addLayout(self.mid_pane) def init_right_pane(self): self.right_pane = QtGui.QVBoxLayout() # RATE button self.rate_btn = QtGui.QPushButton("rate", self) self.rate_btn.clicked.connect(self.rate) self.right_pane.addWidget(self.rate_btn) # OPEN button self.open_btn = QtGui.QPushButton("open", self) self.open_btn.clicked.connect(lambda: webbrowser.open(self.cur_vid)) self.right_pane.addWidget(self.open_btn) # INFO box self.info_box = QtGui.QLabel(self) self.info_box.setText("") self.right_pane.addWidget(self.info_box) # SKIP button # by some magic this is aligned correctly self.skip_btn = QtGui.QPushButton("skip", self) self.skip_btn.clicked.connect(self.skip) self.right_pane.addWidget(self.skip_btn) self.layout.addLayout(self.right_pane) def init_page_btns(self): """ Create the start page and pages to scrape buttons along with their corresponding labels. Each label and spinbox is first grouped together vertically, then put together with the other spinbox horizontally and finally into the main layout of our window. """ self.pg_spinboxes = QtGui.QHBoxLayout() # START PG: spinbox to indicate the page to start scraping on self.start_pg_group = QtGui.QVBoxLayout() self.start_pg_group.setAlignment(QtCore.Qt.AlignTop) self.start_lbl = QtGui.QLabel(self) self.start_lbl.setText("start page") self.start_pg_spn = QtGui.QSpinBox(self) self.start_pg_spn.valueChanged[int].connect(self.set_start_pg) self.start_pg_group.addWidget(self.start_lbl) # "start page" self.start_pg_group.addWidget(self.start_pg_spn) # <spinbox> self.pg_spinboxes.addLayout(self.start_pg_group) self.pg_spinboxes.addSpacing(20) # NUM PGS: spinbox to indicate the number of pages to scrape self.n_pgs_group = QtGui.QVBoxLayout() self.n_pgs_group.setAlignment(QtCore.Qt.AlignTop) self.n_pgs_lbl = QtGui.QLabel(self) self.n_pgs_lbl.setText("pages to scrape") self.n_pgs_spn = QtGui.QSpinBox(self) self.n_pgs_spn.valueChanged[int].connect(self.set_max_pgs) self.n_pgs_spn.setMinimum(1) self.n_pgs_group.addWidget(self.n_pgs_lbl) # "pages to scrape" self.n_pgs_group.addWidget(self.n_pgs_spn) # <spinbox> self.n_pgs_group.setAlignment(QtCore.Qt.Vertical) self.pg_spinboxes.addLayout(self.n_pgs_group) # Combine both in a box. self.pg_spinboxes.setAlignment(QtCore.Qt.AlignTop) self.left_pane.addLayout(self.pg_spinboxes) def retrain(self): QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor) self.predict.refit_from_scratch() redo_predictions(self.predict, self.q.qsize(), self.q) QtGui.QApplication.restoreOverrideCursor() def save_usr_url(self): url = self.load_url_box.text() data = self.scraper.scrape_video(url) self.db.save(data) self.db.give_feedback(url, self.feedback_spin.value()) print("finished") def refresh_images(self): try: r = requests.get(self.cur_img) except: return None if r.status_code == 200: pixmap = QtGui.QPixmap() pixmap.loadFromData(r.content) pixmap.scaledToWidth(self.preview_size) pixmap.scaledToHeight(self.preview_size) self.img.setPixmap(pixmap) self.img.update() self.repaint() r.close() data = self.db.get(self.cur_vid) self.setWindowTitle(data["name"]) info_str = "dur: {}\n\nviews: {}\n\nprediction: {}\n\ntags: {}" n_tags = 15 tag_str = "" if data["tags"]: tags = [tag for tag in data["tags"] if len(tag) > 2] tags = tags[:min(n_tags, len(data["tags"]))] for tag in tags: tag_str += "\n" + tag info_str = info_str.format(data["dur"], data["views"], # old design had an out of 6 scale round(self.last_pred, 2), tag_str) self.info_box.setText(info_str) def pop_video(self): """ Remove a video from the queue and display in the program. """ if self.q.empty(): self.info_box.setText("Video queue empty.") self.repaint() return self.last_pred, self.cur_vid # TODO last_pred should be called cur_pred self.prev_pred = copy.copy(self.last_pred) self.prev_vid = copy.copy(self.cur_vid) self.prev_img = copy.copy(self.cur_img) self.last_pred, self.cur_vid = self.q.get() # PriorityQueue pops the smallest items first, so we have to # remember to get rid of the negative sign we used to make # predictions of high ratings the smallest items. self.last_pred *= -1 self.cur_img = self.db.get_img(self.cur_vid) return self.last_pred, self.cur_vid def unpop_video(self): """ Undo previous pop_video. """ # swap prev with cur self.cur_vid, self.prev_vid = self.prev_vid, self.cur_vid self.cur_img, self.prev_img = self.prev_img, self.cur_img self.refresh_images() def set_start_pg(self, num): self.start_pg = num def set_max_pgs(self, num): self.max_pgs = num def scrape(self): if self.thr: del self.thr self.update_prog(0) self.thr = PopulateQ( self.site, self.niche, self.q, self.start_pg, self.max_pgs, self.winlock, self.predict ) self.thr.updateProgress.connect(self.update_prog) self.thr.start() def rate(self): self.db.give_feedback(self.cur_vid, self.slider.value()) data = self.db.get(self.cur_vid) data["feedback"] = self.slider.value() + 0.0001 # db doesn't like 0s with self.winlock: self.predict.fit(data) if self.q.empty(): self.info_box.setText("Queue Empty") self.repaint() self.default_img_flag = True else: self.pop_video() self.refresh_images() def update_prog(self, progress): self.prog.setValue(progress) if self.default_img_flag: if not self.q.empty(): _, vid_url = self.pop_video() self.refresh_images() self.default_img_flag = False def update_prog_init(self, progress): self.prog.setValue(progress) self.repaint() def skip(self): if self.q.empty(): self.info_box.setText("Queue Empty") self.repaint() self.default_img_flag = True else: self.pop_video() self.refresh_images() def set_niche(self, text): if text == "select niche": self.niche = None self.niche = text def quit(self): self.db.cnx.close() self.predict.quit() if self.thr: del self.thr QtCore.QCoreApplication.instance().quit()