class TestFilter(unittest.TestCase): def setup(self): self.finder = Filter() # Negative Testing 1 def test_data_empty(self): self.finder = Filter() data = self.finder.size() self.assertEqual(data, 0) # Positive Testing 1 def test_data_exist(self): self.finder = Filter(['a', 'b', 'a']) data = self.finder.size() self.assertNotEqual(data, 0) # Negative Testing 1 def test_process_data_empty(self): self.finder = Filter() data = self.finder.process() self.assertListEqual(data, []) # Positive Testing 1 def test_process_data_singlelist(self): self.finder = Filter(['a', 'b', 'a']) data = self.finder.process() self.assertNotEqual(data, 0) # Positive Testing 2 def test_process_data_multipleList(self): self.finder = Filter(['a', 'b', 'a', 'ab', 'cd', 'abc', 'xyz']) data = self.finder.process() self.assertNotEqual(data, 0)
def _getNewYEstimateAtCurrentResolutionLevel(self, X, radiograph, currentTooth, currentResolutionLevel, filter_settings): # Pre process image img = Filter.process_image( deepcopy(radiograph.getImage(deepCopy=True)), filter_settings[0], filter_settings[1], filter_settings[2]) derivate_img = Filter.laplacian(img) # derivate_img = Filter.histogramEql(Filter.process_image(deepcopy(img), filter_settings[0], filter_settings[1], filter_settings[2])) # derivate_img = Filter.process_image(deepcopy(derivate_img), median_kernel=3, bilateral_kernel=5) #cv2.imshow("Test", derivate_img) #cv2.waitKey(0) # Get the correct model multiResModel = self.completeStatisticalModel.getGrayLevelMultiResolutionModel( deepCopy=True) singleResModel = multiResModel.getGrayLevelSignleResModelByResolutionLevelIndex( currentResolutionLevel) toothModel = singleResModel.getGrayLevelToothModelByToothIndex( currentTooth, currentResolutionLevel) # Init X and Y Y = np.zeros((40, 2)) X = X.getLandmarks() counter = 0 for i in range(40): Y[i], is_close = self._getYPointEstimateFromGivenModelPoint( toothModel, i, X, currentResolutionLevel, currentTooth, img, derivate_img) if is_close: counter += 1 return Y, counter / 40
def filterTest(self): radiograph = self.dataHandler.getRadiographs(deepCopy=True)[0] blurred_img = Filter.process_image(deepcopy(radiograph.getImage()), median_kernel=3, bilateral_kernel=10) cv2.imshow("BlurredImage", blurred_img) clahe = cv2.equalizeHist(blurred_img) cv2.imshow("Clahe", clahe) img_1 = Filter.laplacian(blurred_img) cv2.imshow("Img1", img_1)
def _find_poses(self, image): ''' Find initial poses for the image. :param image: image instance :return: array in format [(np.array(x position, y position), scale, rotation), ...] ''' self.top_jaw_line, self.lower_jaw_line = self._find_jaw_separation_line(self._crop_image_sides(image)) upper_jaw_image = self.crop_upper_jaw(image, self.top_jaw_line) lower_jaw_image = self.crop_lower_jaw(image, self.lower_jaw_line) # Filter the image upper_jaw_image = Filter.process_image(upper_jaw_image, median_kernel=5, bilateral_kernel=17, bilateral_color=6) lower_jaw_image = Filter.process_image(lower_jaw_image, median_kernel=5, bilateral_kernel=17, bilateral_color=6) upper_jaw_image = self._convert_to_binary_image(upper_jaw_image) lower_jaw_image = self._convert_to_binary_image(lower_jaw_image) # Find the lines in the image upper_lines = self._find_hough_lines(upper_jaw_image, threshold=15) lower_lines = self._find_hough_lines(lower_jaw_image, threshold=15) # Filter out lines upper_lines = self._filter_lines(upper_lines, upper_jaw_image.shape, line_offset=6, max_line_gap=90) lower_lines = self._filter_lines(lower_lines, lower_jaw_image.shape, line_offset=2, max_line_gap=60) # Compute starting points rho0, theta0 = upper_lines[0] rho1, theta1 = upper_lines[1] rho2, theta2 = upper_lines[2] position0 = np.array((rho0-35+self.crop_sides_size, 50+(self.top_jaw_line - self.crop_upper_jaw_top_size))) position1 = np.array((rho0+(rho1-rho0)/2 + self.crop_sides_size, 80+(self.top_jaw_line - self.crop_upper_jaw_top_size))) position2 = np.array((rho1+(rho2-rho1)/2 + self.crop_sides_size, 80+(self.top_jaw_line - self.crop_upper_jaw_top_size))) position3 = np.array((rho2+35+self.crop_sides_size, 50+(self.top_jaw_line - self.crop_upper_jaw_top_size))) rho0, theta0 = lower_lines[0] rho1, theta1 = lower_lines[1] rho2, theta2 = lower_lines[2] position4 = np.array((rho0-40+self.crop_sides_size, 90+self.lower_jaw_line)) position5 = np.array((rho0+(rho1-rho0)/2+self.crop_sides_size, 90+self.lower_jaw_line)) position6 = np.array((rho1+(rho2-rho1)/2+self.crop_sides_size, 90+self.lower_jaw_line)) position7 = np.array((rho2+40+self.crop_sides_size, 90+self.lower_jaw_line)) return [(position0, 48, 0.05), (position1, 55, 0.2), (position2, 55, 0.2), (position3, 48, 0.3), (position4, 40, 0), (position5, 38, -0.05), (position6, 38, -0.10), (position7, 40, -0.15) ]
def set_radiograph_image(self, radiograph_image): ''' Processes image and saves it's subsampled version into appropriate resolution levels :param image: Image to process. Should be original radiograph image without processing. ''' self.crop_translation = -Filter.get_cropping_region(radiograph_image).left_top image = Filter.crop_image(radiograph_image) for i in range(0, self.levels_count): if i > 0: image = MultiResolutionFramework.downsample_image(image) median_kernel, bilateral_kernel, bilateral_color = MultiResolutionFramework.get_filter_presets(i) filtered_image = Filter.process_image(image.copy(), median_kernel, bilateral_kernel, bilateral_color) self.resolution_levels[i].image = filtered_image self.resolution_levels[i].default_image = image.copy()
def train(self): ''' Train landmark model and prepare images for every level of Gaussian pyramid. ''' if Config.use_file_cache: success = True for i, level in enumerate(self.resolution_levels): success = success and level.landmark_model.load_from_file(str(i)) if success: return # Iterate all radiographs one by one to save memory for r, radiograph in enumerate(self.data_manager.radiographs): image = radiograph.image teeth = self.datamanager.get_all_teeth_from_radiograph(radiograph, True) # Crop image to region of interest and translate all teeth into cropped region crop_translation = -Filter.get_cropping_region(image).left_top image = Filter.crop_image(image) for tooth in teeth: tooth.translate(crop_translation) for i in range(0, self.levels_count): resolution_level = self.resolution_levels[i] assert isinstance(resolution_level, ResolutionLevel) # Downsample the image if needed and update teeth parameters if i > 0: image, teeth = MultiResolutionFramework.downsample(image, teeth) # Create copy of the image to not modify the original one filtered_image = image.copy() assert isinstance(filtered_image, np.ndarray) # Filter the image median_kernel, bilateral_kernel, bilateral_color = MultiResolutionFramework.get_filter_presets(i) filtered_image = Filter.process_image(filtered_image, median_kernel, bilateral_kernel, bilateral_color) # Add new data to the training set for given resolution resolution_level.landmark_model.add_training_data(teeth, filtered_image) print "#Training level %d done" % (i + 1) print "###Training radiograph %d done" % (r + 1) # Finish training at all levels for i, resolution_level in enumerate(self.resolution_levels): resolution_level.landmark_model.finish_training() resolution_level.landmark_model.save_to_file(str(i))
def __init__(self): """ Init the Detector. Will set extractors, NER models, filter, checker and dataframe""" # NER Models self.model = spacy.load("en_core_web_lg") self.ner = AlbertNER(os.path.join(MODELS_PATH, "conll03")) # Check data with movie database df_movies = pd.read_csv(os.path.join(ASSETS_PATH, "movies.csv")) df_movies = df_movies.loc[df_movies.actors.notna()] self.df_movies = df_movies # Extractors self.award_extractor = AwardsExtractor(df_movies) self.genre_extractor = GenreExtractor(df_movies) self.person_extractor = PersonExtractor(df_movies) self.rate_extractor = RateExtractor(df_movies) self.song_extractor = SongExtractor(df_movies) self.title_extractor = TitleExtractor(df_movies) self.trailer_extractor = TrailerExtractor(df_movies) self.year_extractor = YearExtractor(df_movies) self.extractors = [ self.award_extractor, self.genre_extractor, self.person_extractor, self.rate_extractor, self.song_extractor, self.title_extractor, self.trailer_extractor, self.year_extractor ] # Filter self.filter = Filter() # Checker self.checker = Checker(self.filter, df_movies)
class TestFilter(unittest.TestCase): def setUp(self): self.T = 0.001 # Filter update period self.Tf = 3 # Filter time constant self.filter = Filter(self.T, self.Tf) def test_filter_response(self): input = 100 # How many calls it's needed, for 1 filter time constant (Tao) to pass callsPerTao = int(self.Tf/self.T) # Values after 1, 2, 3 Tao expectedOutputs = [63, 86, 95] expectedError = 1 for i in range(3): actual = self._loopAndReturn(callsPerTao, input) expected = expectedOutputs[i] self._assert(actual, expected, expectedError) def _loopAndReturn(self, numTimes, value): for _ in range(numTimes): output = self.filter.calculateOutput(value) return output def _assert(self, actual, expected, maxError): expectedMin = expected - maxError expectedMax = expected + maxError assert expectedMin <= actual <= expectedMax, \ f"expected {expectedMin}-{expectedMax}, actual {actual}"
def _open_radiograph(self): file_dialog = QFileDialog(self) file_dialog.setDirectory("./data/Radiographs") file_dialog.setFileMode(QFileDialog.ExistingFile) file_dialog.setNameFilter("Radiograph (*.tif)") if file_dialog.exec_() and len(file_dialog.selectedFiles()) == 1: radiograph = Radiograph() radiograph.path_to_img = file_dialog.selectedFiles()[0] #self.image = radiograph.image #crop_translation = -Filter.get_cropping_region(radiograph.image).left_top self.image = Filter.crop_image(radiograph.image) self.lines = None self._redraw()
def _open_radiograph(self): file_dialog = QFileDialog(self) file_dialog.setDirectory("./data/Radiographs") file_dialog.setFileMode(QFileDialog.ExistingFile) file_dialog.setNameFilter("Radiograph (*.tif)") if file_dialog.exec_() and len(file_dialog.selectedFiles()) == 1: if self.animator is not None: self.animator.stop() radiograph = Radiograph() radiograph.path_to_img = file_dialog.selectedFiles()[0] self.image = radiograph.image self.radiograph_image = Filter.crop_image(radiograph.image) self.cached_init_poses = None self._redraw(self.active_shape_model.current_tooth)
def trainGrayLevelModelForAllPointsAllExamples(self, k, resolutionLevel, filter_settings): """ calcualtes the gray-level vectors for all point of all teeth in all provided examples. """ radiographs = self.completeDataHandler.getRadiographs(deepCopy=True) g_all = list() for radiograph in radiographs: # Scale image to current resolution level for i in range(resolutionLevel): radiograph.downScale() # Pre process image img = Filter.process_image(deepcopy(radiograph.getImage(deepCopy=True)), filter_settings[0], filter_settings[1], filter_settings[2]) derivate_img = Filter.laplacian(img) # derivate_img = Filter.histogramEql(Filter.process_image(deepcopy(img), filter_settings[0], filter_settings[1], filter_settings[2])) # derivate_img = Filter.process_image(deepcopy(derivate_img), median_kernel=3, bilateral_kernel=5) g_ex = self.trainGrayLevelModelForAllPointsOneExample(img, derivate_img, radiograph, k) g_all.append(g_ex) return np.array(g_all)
def find_jaw_divider(self): self.y_top_line, self.y_lower_line = self.pose_model._find_jaw_separation_line(self.pose_model._crop_image_sides(self.image)) upper_jaw_image = self.pose_model.crop_upper_jaw(self.image, self.y_top_line) lower_jaw_image = self.pose_model.crop_lower_jaw(self.image, self.y_lower_line) # Filter the image upper_jaw_image = Filter.process_image(upper_jaw_image, median_kernel=5, bilateral_kernel=17, bilateral_color=6) lower_jaw_image = Filter.process_image(lower_jaw_image, median_kernel=5, bilateral_kernel=17, bilateral_color=6) upper_jaw_image = self.pose_model._convert_to_binary_image(upper_jaw_image) lower_jaw_image = self.pose_model._convert_to_binary_image(lower_jaw_image) upper_lines = self.pose_model._find_hough_lines(upper_jaw_image, threshold=15) lower_lines = self.pose_model._find_hough_lines(lower_jaw_image, threshold=15) # Filter out lines upper_lines = self.pose_model._filter_lines(upper_lines, upper_jaw_image.shape, line_offset=6, max_line_gap=90) lower_lines = self.pose_model._filter_lines(lower_lines, lower_jaw_image.shape, line_offset=2, max_line_gap=60) self.image = lower_jaw_image self.lines = lower_lines self._redraw()
def __init__(self, data_manager, pca): super(FitterDialog, self).__init__() self.setupUi(self) self.setAttribute(Qt.WA_DeleteOnClose) assert isinstance(data_manager, DataManager) self.data_manager = data_manager self.pca = pca self.active_shape_model = ActiveShapeModel(self.data_manager, self.pca) self.initial_pose_model = InitialPoseModel(self.data_manager) self.scene = InteractiveGraphicsScene() self.graphicsView.setScene(self.scene) self.scene.clicked.connect(self._set_position) self.image = self.data_manager.radiographs[0].image self.radiograph_image = Filter.crop_image(self.data_manager.radiographs[0].image) self.openButton.clicked.connect(self._open_radiograph) self.exportButton.clicked.connect(self._export_result) self.exportButton.setEnabled(False) self.zoomSlider.setRange(-10, 10) self.zoomSlider.setValue(self.current_scale) self.zoomSlider.valueChanged.connect(self.change_scale) self.levelSlider.setRange(1, MultiResolutionFramework.levels_count) self.levelSlider.setValue(self.current_sampling_level + 1) self.levelSlider.valueChanged.connect(self.user_change_level) self.stepButton.clicked.connect(self._perform_one_step_asm) self.animateButton.clicked.connect(self._animator_entry) self._scales = np.empty(self.pca.eigen_values.shape) for i, deviation in enumerate(self.pca.get_allowed_deviation()): slider = QSlider(Qt.Horizontal, self.paramsScrollAreaContents) slider.setRange(-self.slider_resolution, self.slider_resolution) # slider.valueChanged.connect(self.slider_moved) self.paramsScrollAreaContents.layout().addWidget(slider) self._scales[i] = deviation / self.slider_resolution self.show_sampled_positions = self.sampledPositionsCheckBox.isChecked() self.sampledPositionsCheckBox.stateChanged.connect(self.change_show_positions) self.startingPoseSpinBox.setMaximum(len(self.data_manager.selector)-1) self._redraw(self.active_shape_model.current_tooth)
def __init__(self, data_manager): super(InitialPoseDialog, self).__init__() self.setupUi(self) self.setAttribute(Qt.WA_DeleteOnClose) assert isinstance(data_manager, DataManager) self.data_manager = data_manager self.pose_model = InitialPoseModel(data_manager) self.scene = QGraphicsScene() self.graphicsView.setScene(self.scene) self.image = Filter.crop_image(self.data_manager.radiographs[0].image) self.findButton.clicked.connect(self.find_jaw_divider) self.openButton.clicked.connect(self._open_radiograph) self._redraw()
def main(): random.seed() colorama.init() print_timeless(COLOR_HEADER + "Insomniac " + get_version() + "\n" + COLOR_ENDC) ok, args = _parse_arguments() if not ok: return global device_id device_id = args.device if not check_adb_connection(is_device_id_provided=(device_id is not None)): return print("Instagram version: " + get_instagram_version()) device = create_device(args.old, device_id) if device is None: return if len(args.interact) > 0 and args.interaction_users_amount: args.__setattr__("full_interact", args.interact.copy()) while True: mode = None is_interact_enabled = len(args.interact) > 0 is_unfollow_enabled = args.unfollow is not None is_unfollow_non_followers_enabled = args.unfollow_non_followers is not None is_unfollow_any_enabled = args.unfollow_any is not None is_remove_mass_followers_enabled = args.remove_mass_followers is not None and int( args.remove_mass_followers) > 0 total_enabled = int(is_interact_enabled) + int(is_unfollow_enabled) + int(is_unfollow_non_followers_enabled) \ + int(is_unfollow_any_enabled) + int(is_remove_mass_followers_enabled) if total_enabled == 0: print_timeless( COLOR_FAIL + "You have to specify one of the actions: --interact, --unfollow, " "--unfollow-non-followers, --unfollow-any, --remove-mass-followers" + COLOR_ENDC) return elif total_enabled > 1: print_timeless( COLOR_FAIL + "Running Insomniac with two or more actions is not supported yet." + COLOR_ENDC) return else: if is_interact_enabled: print("Action: interact with @" + ", @".join(str(blogger) for blogger in args.interact)) mode = Mode.INTERACT elif is_unfollow_enabled: print("Action: unfollow " + str(args.unfollow)) mode = Mode.UNFOLLOW elif is_unfollow_non_followers_enabled: print("Action: unfollow " + str(args.unfollow_non_followers) + " non followers") mode = Mode.UNFOLLOW_NON_FOLLOWERS elif is_unfollow_any_enabled: print("Action: unfollow any " + str(args.unfollow_any)) mode = Mode.UNFOLLOW_ANY elif is_remove_mass_followers_enabled: print("Action: remove " + str(args.remove_mass_followers) + " mass followers") mode = Mode.REMOVE_MASS_FOLLOWERS profile_filter = Filter(args.filters) start_work_hour, stop_work_hour = 1, 24 if args.working_hours: start_work_hour, stop_work_hour = get_left_right_values( args.working_hours, "Working hours {}", (9, 21)) if not (1 <= start_work_hour <= 24): print(COLOR_FAIL + "Working-hours left-boundary ({0}) is not valid. " "Using (9) instead".format(start_work_hour) + COLOR_ENDC) start_work_hour = 9 if not (1 <= stop_work_hour <= 24): print(COLOR_FAIL + "Working-hours right-boundary ({0}) is not valid. " "Using (21) instead".format(stop_work_hour) + COLOR_ENDC) stop_work_hour = 21 now = datetime.now() if not (start_work_hour <= now.hour <= stop_work_hour): print( "Current Time: {0} which is out of working-time range ({1}-{2})" .format(now.strftime("%H:%M:%S"), start_work_hour, stop_work_hour)) next_execution = '0 {0} * * *'.format(start_work_hour) time_to_sleep_seconds = (croniter( next_execution, now).get_next(datetime) - now).seconds + 60 print("Going to sleep until working time ({0} minutes)...".format( time_to_sleep_seconds / 60)) sleep(time_to_sleep_seconds) continue if len(args.full_interact) > 0 and args.interaction_users_amount: args.interact = args.full_interact.copy() users_amount = get_value(args.interaction_users_amount, "Interaction user amount {}", 100) if users_amount >= len(args.interact): print( "interaction-users-amount parameter is equal or higher then the users-interact list. " "Choosing all list for interaction.") else: amount_to_remove = len(args.interact) - users_amount for i in range(0, amount_to_remove): args.interact.remove(random.choice(args.interact)) session_state = SessionState() session_state.args = args.__dict__ sessions.append(session_state) print_timeless(COLOR_WARNING + "\n-------- START: " + str(session_state.startTime) + " --------" + COLOR_ENDC) open_instagram(device_id) session_state.my_username,\ session_state.my_followers_count,\ session_state.my_following_count = get_my_profile_info(device) storage = Storage(session_state.my_username) # IMPORTANT: in each job we assume being on the top of the Profile tab already if mode == Mode.INTERACT: on_interaction = partial(_on_interaction, likes_limit=int(args.total_likes_limit)) _job_handle_bloggers( device, args.interact, args.likes_count, int(args.follow_percentage), int(args.follow_limit) if args.follow_limit else None, int(args.total_follow_limit) if args.total_follow_limit else None, storage, profile_filter, args.interactions_count, on_interaction) elif mode == Mode.UNFOLLOW: print_timeless("") _job_unfollow(device, get_value(args.unfollow, "Unfollow {}", 100), storage, int(args.min_following), UnfollowRestriction.FOLLOWED_BY_SCRIPT) elif mode == Mode.UNFOLLOW_NON_FOLLOWERS: print_timeless("") _job_unfollow( device, get_value(args.unfollow_non_followers, "Unfollow {} non followers", 100), storage, int(args.min_following), UnfollowRestriction.FOLLOWED_BY_SCRIPT_NON_FOLLOWERS) elif mode == Mode.UNFOLLOW_ANY: print_timeless("") _job_unfollow(device, get_value(args.unfollow_any, "Unfollow {} any", 100), storage, int(args.min_following), UnfollowRestriction.ANY) elif mode == Mode.REMOVE_MASS_FOLLOWERS: _job_remove_mass_followers(device, int(args.remove_mass_followers), int(args.max_following), storage) close_instagram(device_id) print_copyright(session_state.my_username) session_state.finishTime = datetime.now() print_timeless(COLOR_WARNING + "-------- FINISH: " + str(session_state.finishTime) + " --------" + COLOR_ENDC) if args.repeat: print_full_report(sessions) print_timeless("") repeat = get_value(args.repeat, "Sleep for {} minutes", 180) try: sleep(60 * repeat) _refresh_args_by_conf_file(args) except KeyboardInterrupt: print_full_report(sessions) sessions.persist(directory=session_state.my_username) sys.exit(0) else: break print_full_report(sessions) sessions.persist(directory=session_state.my_username)
def setUp(self): self.T = 0.001 # Filter update period self.Tf = 3 # Filter time constant self.filter = Filter(self.T, self.Tf)
def setup(self): self.finder = Filter()
def main(): colorama.init() print_timeless(COLOR_HEADER + "Insomniac " + get_version() + "\n" + COLOR_ENDC) ok, args = _parse_arguments() if not ok: return global device_id device_id = args.device device = uiautomator.device if device_id is None else uiautomator.Device( device_id) if not check_adb_connection(is_device_id_provided=(device_id is not None)): return mode = None is_interact_enabled = len(args.interact) > 0 is_unfollow_enabled = int(args.unfollow) > 0 is_unfollow_non_followers_enabled = int(args.unfollow_non_followers) > 0 total_enabled = int(is_interact_enabled) + int(is_unfollow_enabled) + int( is_unfollow_non_followers_enabled) if total_enabled == 0: print_timeless( COLOR_FAIL + "You have to specify one of the actions: --interact, --unfollow, " "--unfollow-non-followers" + COLOR_ENDC) return elif total_enabled > 1: print_timeless( COLOR_FAIL + "Running Insomniac with two or more actions is not supported yet." + COLOR_ENDC) return else: if is_interact_enabled: print("Action: interact with @" + ", @".join(str(blogger) for blogger in args.interact)) mode = Mode.INTERACT elif is_unfollow_enabled: print("Action: unfollow " + str(args.unfollow)) mode = Mode.UNFOLLOW elif is_unfollow_non_followers_enabled: print("Action: unfollow " + str(args.unfollow_non_followers) + " non followers") mode = Mode.UNFOLLOW_NON_FOLLOWERS profile_filter = Filter() on_interaction = partial(_on_interaction, interactions_limit=int(args.interactions_count), likes_limit=int(args.total_likes_limit)) while True: session_state = SessionState() sessions.append(session_state) print_timeless(COLOR_WARNING + "\n-------- START: " + str(session_state.startTime) + " --------" + COLOR_ENDC) open_instagram(device_id) session_state.my_username = get_my_username(device) storage = Storage(session_state.my_username) # IMPORTANT: in each job we assume being on the top of the Profile tab already if mode == Mode.INTERACT: _job_handle_bloggers(device, args.interact, int(args.likes_count), int(args.follow_percentage), storage, profile_filter, on_interaction) elif mode == Mode.UNFOLLOW: _job_unfollow(device, int(args.unfollow), storage, only_non_followers=False) elif mode == Mode.UNFOLLOW_NON_FOLLOWERS: _job_unfollow(device, int(args.unfollow_non_followers), storage, only_non_followers=True) close_instagram(device_id) print_copyright(session_state.my_username) session_state.finishTime = datetime.now() print_timeless(COLOR_WARNING + "-------- FINISH: " + str(session_state.finishTime) + " --------" + COLOR_ENDC) if args.repeat: _print_report() repeat = int(args.repeat) print_timeless("") print("Sleep for " + str(repeat) + " minutes") try: sleep(60 * repeat) except KeyboardInterrupt: _print_report() sys.exit(0) else: break _print_report()
def test_process_data_empty(self): self.finder = Filter() data = self.finder.process() self.assertListEqual(data, [])
def test_data_empty(self): self.finder = Filter() data = self.finder.size() self.assertEqual(data, 0)
def on_status(self, status): nyan = Filter() # ここのshiotomohackを自分のユーザネームにする if str(status.user.screen_name) == "shiotomohack": nyan.nyan_filter(status)
parser = argparse.ArgumentParser(description='Analyze non-trivial chess moves.') parser.add_argument('-e', '--config', help='path to UCI Server configuration file (relative or absolute)', default="uciServer.json", type=str) # Can't be -h due to conflicts (Why?) TODO:fix parser.add_argument('-hd', '--headers', help='what headers should be put to output PGN file (all, concise, minimal)', default="minimal", type=str) parser.add_argument('-cp', '--centipawns', help='min. required cp (centipawns) difference between best and second best move shown by the engine', default=50, type=int) parser.add_argument('-d', '--depth', help='min engine search depth for best and second best move shown by the engine (in multivariation mode)', default=30, type=int) parser.add_argument('-n', '--variations-number', help='number of variations in multi-variation mode', default=2, type=int) parser.add_argument('args', nargs='*') # Input and output .PGN file path args = parser.parse_args() input_pgn_path = args.args[0] if len(args.args) > 0 else None output_pgn_path = args.args[1] if len(args.args) > 1 else None games = Game(input_pgn_path).games filter = Filter() saver = Saver(output_pgn_path) time = calculate_time_for_messenger(args.depth) if os.path.exists(args.config): with open(args.config) as json_file: config = json.load(json_file) for game in games: board = game.board() headers = get_headers(game, args) with Communicator(config) as communicator: for move in game.mainline_moves(): filter.evaluate_position(move, game, board, args, communicator, time) if filter.pass_filters(move, game, board, args, communicator, time): saver.save(board.fen(), filter.moves, filter.evaluations, filter.played, headers)
def main(): random.seed() colorama.init() print_timeless(COLOR_HEADER + "Insomniac " + get_version() + "\n" + COLOR_ENDC) ok, args = _parse_arguments() if not ok: return global device_id device_id = args.device if not check_adb_connection(is_device_id_provided=(device_id is not None)): return device = create_device(args.old, device_id) if device is None: return mode = None is_interact_enabled = len(args.interact) > 0 is_unfollow_enabled = args.unfollow is not None is_unfollow_non_followers_enabled = args.unfollow_non_followers is not None is_unfollow_any_enabled = args.unfollow_any is not None is_remove_mass_followers_enabled = args.remove_mass_followers is not None and int( args.remove_mass_followers) > 0 total_enabled = int(is_interact_enabled) + int(is_unfollow_enabled) + int(is_unfollow_non_followers_enabled) \ + int(is_unfollow_any_enabled) + int(is_remove_mass_followers_enabled) if total_enabled == 0: print_timeless( COLOR_FAIL + "You have to specify one of the actions: --interact, --unfollow, " "--unfollow-non-followers, --unfollow-any, --remove-mass-followers" + COLOR_ENDC) return elif total_enabled > 1: print_timeless( COLOR_FAIL + "Running Insomniac with two or more actions is not supported yet." + COLOR_ENDC) return else: if is_interact_enabled: print("Action: interact with @" + ", @".join(str(blogger) for blogger in args.interact)) mode = Mode.INTERACT elif is_unfollow_enabled: print("Action: unfollow " + str(args.unfollow)) mode = Mode.UNFOLLOW elif is_unfollow_non_followers_enabled: print("Action: unfollow " + str(args.unfollow_non_followers) + " non followers") mode = Mode.UNFOLLOW_NON_FOLLOWERS elif is_unfollow_any_enabled: print("Action: unfollow any " + str(args.unfollow_any)) mode = Mode.UNFOLLOW_ANY elif is_remove_mass_followers_enabled: print("Action: remove " + str(args.remove_mass_followers) + " mass followers") mode = Mode.REMOVE_MASS_FOLLOWERS profile_filter = Filter() while True: session_state = SessionState() session_state.args = args.__dict__ sessions.append(session_state) print_timeless(COLOR_WARNING + "\n-------- START: " + str(session_state.startTime) + " --------" + COLOR_ENDC) open_instagram(device_id) session_state.my_username,\ session_state.my_followers_count,\ session_state.my_following_count = get_my_profile_info(device) storage = Storage(session_state.my_username) # IMPORTANT: in each job we assume being on the top of the Profile tab already if mode == Mode.INTERACT: on_interaction = partial(_on_interaction, likes_limit=int(args.total_likes_limit)) _job_handle_bloggers( device, args.interact, args.likes_count, int(args.follow_percentage), int(args.follow_limit) if args.follow_limit else None, storage, profile_filter, args.interactions_count, on_interaction) elif mode == Mode.UNFOLLOW: print_timeless("") _job_unfollow(device, get_value(args.unfollow, "Unfollow {}", 100), storage, int(args.min_following), UnfollowRestriction.FOLLOWED_BY_SCRIPT) elif mode == Mode.UNFOLLOW_NON_FOLLOWERS: print_timeless("") _job_unfollow( device, get_value(args.unfollow_non_followers, "Unfollow {} non followers", 100), storage, int(args.min_following), UnfollowRestriction.FOLLOWED_BY_SCRIPT_NON_FOLLOWERS) elif mode == Mode.UNFOLLOW_ANY: print_timeless("") _job_unfollow(device, get_value(args.unfollow_any, "Unfollow {} any", 100), storage, int(args.min_following), UnfollowRestriction.ANY) elif mode == Mode.REMOVE_MASS_FOLLOWERS: _job_remove_mass_followers(device, int(args.remove_mass_followers), int(args.max_following), storage) close_instagram(device_id) print_copyright(session_state.my_username) session_state.finishTime = datetime.now() print_timeless(COLOR_WARNING + "-------- FINISH: " + str(session_state.finishTime) + " --------" + COLOR_ENDC) if args.repeat: print_full_report(sessions) print_timeless("") repeat = get_value(args.repeat, "Sleep for {} minutes", 180) try: sleep(60 * repeat) except KeyboardInterrupt: print_full_report(sessions) sessions.persist(directory=session_state.my_username) sys.exit(0) else: break print_full_report(sessions) sessions.persist(directory=session_state.my_username)
def test_process_data_singlelist(self): self.finder = Filter(['a', 'b', 'a']) data = self.finder.process() self.assertNotEqual(data, 0)
def test_data_exist(self): self.finder = Filter(['a', 'b', 'a']) data = self.finder.size() self.assertNotEqual(data, 0)
def test_process_data_multipleList(self): self.finder = Filter(['a', 'b', 'a', 'ab', 'cd', 'abc', 'xyz']) data = self.finder.process() self.assertNotEqual(data, 0)