def process_ascii_art(self, destination): # Glue function to take PIL Image object, calculate brightness for each pixel and map to an ascii character. # The function yields it's output at every completed row which is consumed by print_to_terminal() or # print_to_file(). # Scale image for output if destination == 'terminal': # Output to terminal terminal_scale = self.scale_for_terminal() (new_width, new_height) = (self.image.width//(self.x_calibrate * terminal_scale), self.image.height//(self.y_calibrate * terminal_scale)) else: # Output to file (8.5 X 11 assumed) page_scale = self.scale_for_page() (new_width, new_height) = (self.image.width//(self.x_calibrate * page_scale), self.image.height//(self.y_calibrate * page_scale)) # Create resized Image instance to process. scaled_image = self.image.resize((int(new_width), int(new_height))) # Initiate brightness calc object bc = Brightness(self.brightness_calc) min_brightness = min(bc.calc(pixel) for pixel in scaled_image.getdata()) max_brightness = max(bc.calc(pixel) for pixel in scaled_image.getdata()) brightness_range = max_brightness - min_brightness # Build ascii_art pixel to char array ascii_row = [] for i, p in enumerate(scaled_image.getdata()): if i % scaled_image.width - 1 == 0: yield ''.join(ascii_row) ascii_row = [] else: adjusted_brightness = bc.calc(p) - min_brightness ascii_char = self.brightness_to_char(adjusted_brightness, brightness_range) ascii_row.append(ascii_char)
def should_have_executed(monkeypatch) -> None: def mock_execute_cmd(*args: tuple, **kwargs: dict) -> str: assert args[0] == ['brightness', brightness_level] return expected_result mock_parse_argv(MODULE_NAME, 'Brightness', monkeypatch) mute_logs(MODULE_NAME, monkeypatch) monkeypatch.setattr(f"{MODULE_NAME}.execute_cmd", mock_execute_cmd) brightness_level = str(random.random()) expected_result = next_alphanumeric(10) automation = Brightness() automation.brightness = brightness_level assert automation.execute() == expected_result
def __init__(self): self.brightness = Brightness() self.gamma = Gamma() self.gain = Gain() self.exposure = Exposure() self.exposureauto = ExposureAuto() self.saturation = Saturation() self.hue = Hue() self.whitebalancered = WhiteBalanceRed() self.whitebalanceblue = WhiteBalanceBlue() self.Parameters = { 'Brightness': self.brightness.Brightness, 'Gamma': self.gamma.Gamma, 'Gain': self.gain.Gain, 'Exposure': self.exposure.Exposure, 'ExposureAuto': self.exposureauto.ExposureAuto, 'Saturation': self.saturation.Saturation, 'Hue': self.hue.Hue, 'WhiteBalanceRed': self.whitebalancered.WhiteBalanceRed, 'WhiteBalanceBlue': self.whitebalanceblue.WhiteBalanceBlue }
def should_have_printed_usage_instructions(monkeypatch) -> None: print_coloured_calls = [] mock_parse_argv(MODULE_NAME, 'Brightness', monkeypatch) monkeypatch.setattr(f"{MODULE_NAME}.print_coloured", lambda *a, **k: print_coloured_calls.append('')) Brightness().usage() assert len(print_coloured_calls) == 2
def handle_color_slider_change(self, sender): c = (self.cpr.value / 255, self.cpg.value / 255, self.cpb.value / 255, 1) self.color = c self.demo_lbl.background_color = c self.demo_lbl.text_color = 'black' if Brightness.is_light( *(c[:3])) else 'white' self.demo_lbl.text = self.get_hex_color(self.cpr.value, self.cpg.value, self.cpb.value)
def should_not_have_parsed_argv_with_wrong_or_no_option( monkeypatch, argv: List[str]) -> None: def mock_raise_error(*args: tuple, **kwargs: dict) -> None: assert kwargs['usage'] is not None raise SystemExit(0) # Controlled early exit monkeypatch.setattr(f"{MODULE_NAME}.raise_error", mock_raise_error) with pytest.raises(SystemExit) as e: Brightness(argv) assert e.type == SystemExit assert e.value.code == 0
# # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from libqtile.config import Key, Screen, Group, Drag, Click from libqtile.command import lazy from libqtile import layout, bar, widget, hook from brightness import Brightness import subprocess, re brightness = Brightness() mod = "mod4" font = 'Ubuntu Mono' foreground = '#BBBBBB' alert = "#FFFF00" fontsize = 16 font_params = { 'font': font, 'fontsize': fontsize, 'foreground': foreground, } keys = [
def should_have_parsed_argv(argv: List[str], expected_brightness_level: float) -> None: brightness = Brightness(argv) assert brightness.argv == argv assert brightness.brightness == expected_brightness_level
import time from args_helper import Args from csv_helper import read_file from brightness import Brightness from gui import ForegroundGUI from event import ForegroundEvent brightness = Brightness() global_desired_dict = {} def window_check(current_window, current_brightness): if current_window in global_desired_dict: brightness_true = global_desired_dict[current_window] if current_brightness != brightness_true: brightness.set_brightness(brightness_true) if args.verbose: print( 'Same window name detected. Window name: "{}" | Brightness: {}' .format(current_window, brightness_true)) else: if args.verbose: print( '"{}" does not exist. Setting brightness to default {}'.format( current_window, args.default_brightness)) brightness.set_brightness(args.default_brightness) def hook_callback(current_window): if args.verbose: print("Window returned from hook: {}".format(current_window))
fx=0.5, fy=0.5, interpolation=cv2.INTER_NEAREST) # Gray scale gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) return frame, gray if __name__ == '__main__': webcam = cv2.VideoCapture(0) face_cascade = cv2.CascadeClassifier( './haar/haarcascade_frontalface_alt2.xml') eye_cascade = cv2.CascadeClassifier('./haar/haarcascade_eye.xml') original_size = None maximum_size = 0 screen = Brightness() original_bright = screen.get_brightness() current_bright = None while True: # Read from webcam got_frame, original_frame = webcam.read() if got_frame: frame, gray = reduce_gray_image(original_frame) # Detect faces faces = face_cascade.detectMultiScale(gray, SCALE_FACTOR, MIN_NEIGHBOR) bright = current_bright for (fx, fy, fw, fh) in faces: if DEBUG: cv2.rectangle(frame, (fx, fy), (fx + fw, fy + fh), BLUE, 2)
def setter(arguments, user): print(arguments) obj = arguments[0] setting = arguments[1] curr = None if obj == "volume": if setting == "up": Audio.volume_up() elif setting == "down": Audio.volume_down() elif setting == "full": Audio.volume_max() elif setting == "mute" or setting == "off": Audio.mute() elif setting == "unmute": Audio.mute() else: Audio.set(int(setting)) curr = Audio.current_volume() if obj == "brightness": if setting == "up": Brightness.up() elif setting == "down": Brightness.down() elif setting == "full" or setting == "max": Brightness.max() elif setting == "min": Brightness.min() else: Brightness.set(int(setting)) curr = Brightness.current_brightness() if obj == "alarm": Alarm.set(setting) return obj + " set to " + str(curr)