Example #1
0
 def pick_bootloader(self):
     """
     A menu to pick the bootloader from bootloaders.json.
     Also records fuse information in self.write_fuse_params,
     where it stays until user selects another bootloader
     or manually selects a file by path.
     """
     bootloader_dir = local_path("bootloaders/")
     config = read_config(
         os.path.join(bootloader_dir, self.bootloader_config_filename))
     bootloader_choices = [[bootloader["name"], bootloader]
                           for bootloader in config["bootloaders"]]
     if not bootloader_choices:
         PrettyPrinter("No bootloaders found!", self.i, self.o, 3)
         return
     choice = Listbox(bootloader_choices,
                      self.i,
                      self.o,
                      name="Avrdude bootloader picker").activate()
     if choice:
         self.write_file = os.path.join(bootloader_dir, choice["file"])
         self.write_fuse_params = []
         for type in self.fuse_types:
             if type in choice:
                 self.write_fuse_params.append(
                     [type, choice[type], config["fuse_format"]])
         self.config["last_write_file"] = self.write_file
         self.config["last_write_fuse_params"] = self.write_fuse_params
         self.save_config()
Example #2
0
def maybe_first_time_setup():
    """
    Set up the user's notes directory/folder the first time they run
    NoteBag.

    Returns False if it failed, or needs to try again; returns True if
    it succeeds, or doesn't need to happen at all.
    """

    if not os.path.isfile(get_config_path(CONFIG_FILENAME)):
        shutil.copy2(get_config_path(TEMPLATE_CONFIG_FILENAME),
                     get_config_path(CONFIG_FILENAME))

    config = read_config(CONFIG_FILENAME)
    if config.get("NoteBag", "Notes Directory"):
       return True

    if not messagebox.askokcancel(
            "NoteBag Setup",
            "Hi! It looks like this is your first time running NoteBag!\n"
            "Please choose the folder where you would like NoteBag to keep your notes."
            ):
        return False

    notes_dir = filedialog.askdirectory(title="Notes Folder")
    print(notes_dir)
    if not notes_dir:
        return False

    config.set("NoteBag", "Notes Directory", notes_dir)
    save_config(config, CONFIG_FILENAME)
    return True
Example #3
0
def gotConnection(conn, authentication):
    config = process_config(read_config(settings.gitosis_config))
    QUEUE = random_queue()

    yield conn.start(authentication)
    chan = yield conn.channel(1)
    yield chan.channel_open()

    # Initialize the MQ state
    yield chan.queue_declare(queue=QUEUE, durable=False, exclusive=False, auto_delete=True)
    yield chan.exchange_declare(exchange=config['exchange'], type="fanout",
                                durable=True, auto_delete=False)
    yield chan.queue_bind(queue=QUEUE, exchange=config['exchange'])
    yield chan.basic_consume(queue=QUEUE, no_ack=True, consumer_tag="smart")

    queue = yield conn.queue("smart")
    recv_callback = callback_wrapper(config['projects_dir'],
                                     config['git_user'],
                                     config['git_server'])
    yield (queue.get().addCallback(recv_callback, chan, queue))


    # This is all about closing the connection nicely
    yield chan.basic_cancel("smart")
    yield chan.channel_close()
    chan0 = yield conn.channel(0)
    yield chan0.connection_close()

    reactor.stop()
Example #4
0
 def get_user_list(self):
     users = list(
         filter(lambda x: Constants.CONFIG_USERNAME_PREFIX in x,
                helpers.read_config().sections()))
     return list(
         map(lambda x: x.replace(Constants.CONFIG_USERNAME_PREFIX, ''),
             users))
Example #5
0
def maybe_first_time_setup():
    """
    Set up the user's notes directory/folder the first time they run
    NoteBag.

    Returns False if it failed, or needs to try again; returns True if
    it succeeds, or doesn't need to happen at all.
    """

    if not os.path.isfile(get_config_path(CONFIG_FILENAME)):
        shutil.copy2(get_config_path(TEMPLATE_CONFIG_FILENAME),
                     get_config_path(CONFIG_FILENAME))

    config = read_config(CONFIG_FILENAME)
    if config.get("NoteBag", "Notes Directory"):
        return True

    if not messagebox.askokcancel(
            "NoteBag Setup",
            "Hi! It looks like this is your first time running NoteBag!\n"
            "Please choose the folder where you would like NoteBag to keep your notes."
    ):
        return False

    notes_dir = filedialog.askdirectory(title="Notes Folder")
    print(notes_dir)
    if not notes_dir:
        return False

    config.set("NoteBag", "Notes Directory", notes_dir)
    save_config(config, CONFIG_FILENAME)
    return True
Example #6
0
def init():
    """Initialize input and output objects"""

    global input_processor, screen, cm, config, config_path
    config = None

    # Load config
    for config_path in config_paths:
        #Only try to load the config file if it's present
        #(unclutters the logs)
        if os.path.exists(config_path):
            try:
                logging.debug('Loading config from {}'.format(config_path))
                config = read_config(config_path)
            except:
                logging.exception(
                    'Failed to load config from {}'.format(config_path))
            else:
                logging.info(
                    'Successfully loaded config from {}'.format(config_path))
                break
    # After this loop, the config_path global should contain
    # path for config that successfully loaded

    if config is None:
        sys.exit('Failed to load any config files!')

    # Initialize output
    try:
        screen = output.init(config['output'])
    except:
        logging.exception('Failed to initialize the output object')
        logging.exception(traceback.format_exc())
        sys.exit(2)

    # Initialize the context manager
    cm = ContextManager()

    # Initialize input
    try:
        # Now we can show errors on the display
        input_processor = input.init(config["input"], cm)
    except:
        logging.exception('Failed to initialize the input object')
        logging.exception(traceback.format_exc())
        Printer(['Oops. :(', 'y u make mistake'], None, screen, 0)
        sys.exit(3)

    # Tying objects together
    if hasattr(screen, "set_backlight_callback"):
        screen.set_backlight_callback(input_processor)
    cm.init_io(input_processor, screen)
    cm.switch_to_context("main")
    i, o = cm.get_io_for_context("main")

    return i, o
Example #7
0
    def load_config(self):
        """
        Load NoteBag's config file, and use it to set config options.
        """

        config = self.config = read_config(CONFIG_FILENAME)
        self.notes_list_filename = config.get("NoteBag", "Notes List File")
        self.notes_dir = config.get("NoteBag", "Notes Directory")
        self.note_template_filename = config.get("NoteBag", "Note Template Filename")
        self.document_editor = config.get("NoteBag", "Document Editor")
Example #8
0
 def set_config(self, key, val):
     if key[0] == '#':
         run_command(
             f'/usr/bin/sudo /bin/sed -i "s|^#*{key[1:]}=.*|{key}={val}|" {self.hostapd_conf}'
         )
     else:
         run_command(
             f'/usr/bin/sudo /bin/sed -i "s|^#*{key}=.*|{key}={val}|" {self.hostapd_conf}'
         )
     self.config = read_config(self.hostapd_conf)
Example #9
0
    def load_config(self):
        """
        Load NoteBag's config file, and use it to set config options.
        """

        config = self.config = read_config(CONFIG_FILENAME)
        self.notes_list_filename = config.get("NoteBag", "Notes List File")
        self.notes_dir = config.get("NoteBag", "Notes Directory")
        self.note_template_filename = config.get("NoteBag",
                                                 "Note Template Filename")
        self.document_editor = config.get("NoteBag", "Document Editor")
Example #10
0
def init():
    """ This function is called by main.py to read the output configuration, pick the corresponding drivers and initialize a Screen object.

    It also sets ``screen`` global of ``output`` module with created ``Screen`` object."""
    global screen
    config = read_config("config.json")
    output_config = config["output"][0]
    driver_name = output_config["driver"]
    driver_module = importlib.import_module("output.drivers." + driver_name)
    args = output_config["args"] if "args" in output_config else []
    kwargs = output_config["kwargs"] if "kwargs" in output_config else {}
    screen = driver_module.Screen(*args, **kwargs)
Example #11
0
def init():
    """ This function is called by main.py to read the output configuration, pick the corresponding drivers and initialize a Screen object.

    It also sets ``screen`` global of ``output`` module with created ``Screen`` object."""
    global screen
    config = read_config("config.json")
    output_config = config["output"][0]
    driver_name = output_config["driver"]
    driver_module = importlib.import_module("output.drivers."+driver_name)
    args = output_config["args"] if "args" in output_config else []
    kwargs = output_config["kwargs"] if "kwargs" in output_config else {}
    screen = driver_module.Screen(*args, **kwargs)
Example #12
0
def init():
    """Initialize input and output objects"""

    global config, config_path

    # Load config
    for config_path in config_paths:
        #Only try to load the config file if it's present
        #(unclutters the logs)
        if os.path.exists(config_path):
            try:
                logging.debug('Loading config from {}'.format(config_path))
                config = read_config(config_path)
            except:
                logging.exception(
                    'Failed to load config from {}'.format(config_path))
            else:
                logging.info(
                    'Successfully loaded config from {}'.format(config_path))
                break
    # After this loop, the config_path global should contain
    # path for config that successfully loaded

    if config is None:
        sys.exit('Failed to load any config files!')

    # Initialize output
    try:
        output.init(config['output'])
        o = output.screen
    except:
        logging.exception('Failed to initialize the output object')
        logging.exception(traceback.format_exc())
        sys.exit(2)

    # Initialize input
    try:
        # Now we can show errors on the display
        input.init(config['input'])
        i = input.listener
    except:
        logging.exception('Failed to initialize the input object')
        logging.exception(traceback.format_exc())
        Printer(['Oops. :(', 'y u make mistake'], None, o, 0)
        sys.exit(3)

    if hasattr(o, "set_backlight_callback"):
        o.set_backlight_callback(i)

    return i, o
Example #13
0
def init():
    """ This function is called by main.py to read the input configuration, pick the corresponding driver and initialize InputListener.
 
    It also sets ``driver`` and ``listener`` globals of ``input`` module with driver and listener respectively, as well as registers ``listener.stop()`` function to be called when script exits since it's in a blocking non-daemon thread."""
    global listener, driver
    config = read_config("config.json")
    input_config = config["input"][0]
    driver_name = input_config["driver"]
    driver_module = importlib.import_module("input.drivers."+driver_name)
    args = input_config["args"] if "args" in input_config else []
    kwargs = input_config["kwargs"] if "kwargs" in input_config else {}
    driver = driver_module.InputDevice(*args, **kwargs)
    listener = InputListener(driver)
    atexit.register(listener.atexit)
Example #14
0
def init():
    """ This function is called by main.py to read the input configuration, pick the corresponding driver and initialize InputListener.
 
    It also sets ``driver`` and ``listener`` globals of ``input`` module with driver and listener respectively, as well as registers ``listener.stop()`` function to be called when script exits since it's in a blocking non-daemon thread."""
    global listener, driver
    config = read_config("config.json")
    input_config = config["input"][0]
    driver_name = input_config["driver"]
    driver_module = importlib.import_module("input.drivers." + driver_name)
    args = input_config["args"] if "args" in input_config else []
    kwargs = input_config["kwargs"] if "kwargs" in input_config else {}
    driver = driver_module.InputDevice(*args, **kwargs)
    listener = InputListener(driver)
    atexit.register(listener.atexit)
Example #15
0
def gotConnection(conn, authentication, body):
    config = process_config(read_config(settings.gitosis_config))

    yield conn.start(authentication)
    chan = yield conn.channel(1)
    yield chan.channel_open()

    msg = Content(body)
    msg["delivery mode"] = 2
    chan.basic_publish(exchange=config['exchange'], content=msg)
    
    yield chan.channel_close()

    chan0 = yield conn.channel(0)
    yield chan0.connection_close()
    
    reactor.stop()
def main():
    config = helpers.read_config()
    elogger = logger.get_logger()

    # initialize arrays for short-term and long-term traffic features
    speed_array = 'speeds'
    time_array = 'times'
    short_ttf = [[
        collections.defaultdict(lambda: {
            speed_array: [],
            time_array: []
        }) for _ in range(256)
    ] for _ in range(256)]
    long_ttf = [[
        collections.defaultdict(lambda: {
            speed_array: [],
            time_array: []
        }) for _ in range(256)
    ] for _ in range(256)]

    for data_file in config['data']:
        elogger.info(
            'Generating G and T paths and extracting traffic features on {} ...'
            .format(data_file))

        data = helpers.read_data(data_file)

        define_travel_grid_path(data, config['coords'], short_ttf, long_ttf,
                                args.grid_size)

        elogger.info(
            'Saving extended with G and T paths data in {}{}.\n'.format(
                args.data_destination_folder, data_file))
        helpers.save_processed_data(data, args.data_destination_folder,
                                    data_file)

    elogger.info('Aggregate historical traffic features ...')
    helpers.aggregate_historical_data(short_ttf, long_ttf)
    elogger.info('Saving extracted traffic features in {}'.format(
        args.ttf_destination_folder))
    helpers.save_extracted_traffic_features(short_ttf, long_ttf,
                                            args.ttf_destination_folder)
Example #17
0
def load_config():
    config = None
    # Load config
    for config_path in config_paths:
        #Only try to load the config file if it's present
        #(unclutters the logs)
        if os.path.exists(config_path):
            try:
                logging.debug('Loading config from {}'.format(config_path))
                config = read_config(config_path)
            except:
                logging.exception('Failed to load config from {}'.format(config_path))
                config_path = None
            else:
                logging.info('Successfully loaded config from {}'.format(config_path))
                break
    # After this loop, the config_path global should contain
    # path for config that successfully loaded

    return config, config_path
Example #18
0
def init():
    """Initialize input and output objects"""

    config = None

    # Load config
    for path in config_paths:
        try:
            logging.debug('Loading config from {}'.format(path))
            config = read_config(path)
        except:
            logging.exception('Failed to load config from {}'.format(path))
        else:
            logging.info('Successfully loaded config from {}'.format(path))
            break

    if config is None:
        sys.exit('Failed to load any config file')

    # Initialize output
    try:
        output.init(config['output'])
        o = output.screen
    except:
        logging.exception('Failed to initialize the output object')
        logging.exception(traceback.format_exc())
        sys.exit(2)

    # Initialize input
    try:
        # Now we can show errors on the display
        input.init(config['input'])
        i = input.listener
    except:
        logging.exception('Failed to initialize the input object')
        logging.exception(traceback.format_exc())
        Printer(['Oops. :(', 'y u make mistake'], None, o, 0)
        sys.exit(3)

    return i, o
Example #19
0
    yield chan.channel_open()

    msg = Content(body)
    msg["delivery mode"] = 2
    chan.basic_publish(exchange=config['exchange'], content=msg)
    
    yield chan.channel_close()

    chan0 = yield conn.channel(0)
    yield chan0.connection_close()
    
    reactor.stop()
    
if __name__ == "__main__":
    import sys
    if len(sys.argv) != 3:
        print "%s path_to_spec content" % sys.argv[0]
        sys.exit(1)

    config = process_config(read_config(settings.gitosis_config))
    spec = txamqp.spec.load(sys.argv[1])
    authentication = {"LOGIN": config['user_id'], "PASSWORD": config['password']}

    delegate = TwistedDelegate()
    d = ClientCreator(reactor, AMQClient, delegate=delegate, vhost="/",
        spec=spec).connectTCP(config['host'], config['port'])

    d.addCallback(gotConnection, authentication, sys.argv[2])

    reactor.run()
Example #20
0
#-*- coding:utf8 -*-
import sys
import os
import tkMessageBox

from selenium import webdriver
from pyvirtualdisplay import Display
from helpers import read_config
from test_cases import *
from browser import WebBrowser
from letters import DraftManager, SendingManager


if __name__ == "__main__":
    # reading from config
    config_dict = read_config()
    GUI = int(config_dict["GUI"][0])
    BROWSER = config_dict["BROWSER"][0].lower()
    CHROMEDRIVER_PATH = config_dict["CHROMEDRIVER_PATH"][0]
    MAIL_URL = config_dict["MAIL_URL"][0]
    LOGIN = config_dict["LOGIN"][0]
    PASSWORD = config_dict["PASSWORD"][0]
    ATTACHMENTS = config_dict["ATTACHMENTS"]
    DESTINATION = config_dict["DESTINATION"][0]
    INLINES = config_dict["INLINES"]
    CC = config_dict["CC"]
    BCC = config_dict["BCC"]
    REPLY_TO = config_dict["REPLY_TO"]
    SYSTEM_TAGS = ["inbox", "starred", "sent", "drafts", "trash", "all",
        "spam"]
    TEST_TAGS = ["tag_test 1", "tag_test 2", "tag_test 3", "tag_test4"]
Example #21
0
 def get_active_user(self):
     return helpers.read_config().get(Constants.CONFIG_SECTION_GLOBAL,
                                      Constants.CONFIG_OPTION_ACTIVE_USER)
Example #22
0
 def get_user_password(self, user, mode):
     return helpers.read_config().get(
         Constants.CONFIG_USERNAME_PREFIX + user,
         Constants.CONFIG_OPTION_PASSWORD_PREFIX + str(mode))
Example #23
0
def main():
    """
        Main program loop
        * Read configuration file and validate
        * Authenticate if require
        * Export data from Trakt.tv
        * Cleanup list from Trakt.tv
        * Write to CSV
        """
    ## Parse inputs if any
    parser = argparse.ArgumentParser(description=desc, epilog=epilog)
    list_group = parser.add_mutually_exclusive_group(required=True)
    parser.add_argument('-v', action='version', version='%(prog)s 0.3')
    parser.add_argument(
        '-c',
        '--config',
        help='allow to overwrite default config filename, default %(default)s',
        action='store',
        type=str,
        dest='config',
        default='config.ini')
    parser.add_argument(
        '-o',
        '--output',
        help='allow to overwrite default output filename, default %(default)s',
        nargs='?',
        type=str,
        const='export.csv',
        default=None)
    parser.add_argument('-t',
                        '--type',
                        help='allow to overwrite type, default %(default)s',
                        choices=['movies', 'shows', 'episodes'],
                        dest='type',
                        default='movies')
    list_group.add_argument(
        '-l',
        '--list',
        help='allow to overwrite default list, default %(default)s',
        choices=['watchlist', 'collection', 'history'],
        dest='list',
        default='history')
    list_group.add_argument(
        '-u',
        '--userlist',
        help='allow to export a user custom list, default %(default)s',
        dest='userlist',
        default=False,
        action='store_true')
    parser.add_argument('-C',
                        '--clean',
                        help='empty list after export, default %(default)s',
                        default=False,
                        action='store_true',
                        dest='clean')
    parser.add_argument(
        '-D',
        '--duplicate',
        help='remove duplicate from list after export, default %(default)s',
        default=False,
        action='store_true',
        dest='dup')
    #parser.add_argument('-d', '--dryrun',
    #              help='do not update the account, default %(default)s',
    #              default=True, action='store_true', dest='dryrun')
    parser.add_argument(
        '-V',
        '--verbose',
        help='print additional verbose information, default %(default)s',
        default=False,
        action='store_true',
        dest='verbose')
    options = parser.parse_args()

    ## Display debug information
    if options.verbose:
        print("Options: %s" % options)

    if options.type == 'episodes' and options.list == "collection":
        print(
            "Error, you can only fetch {0} from the history or watchlist list".
            format(options.type))
        sys.exit(1)

    if options.userlist:
        options.list = "user list"

    if not options.output:
        options.output = 'export_{type}_{list}.csv'.format(type=options.type,
                                                           list=options.list)

    ## Read configuration and validate
    helpers.read_config(_trakt, options)

    ## Try refreshing to get new access token. If it doesn't work, user needs to authenticate again.
    helpers.api_auth_refresh(_trakt, _headers, options)

    ## Display debug information
    if options.verbose:
        print("trakt: {}".format(_trakt))
        print("Authorization header: {}".format(_headers['Authorization']))

    export_data = []
    ## Get Trakt user lists (custom lists)
    if options.userlist:
        export_data = helpers.api_get_userlists(_trakt, _headers, _proxyDict,
                                                options, 1)
        #print("export data")
        #print(export_data)
        if export_data:
            print("Found {0} user list(s)".format(len(export_data)))
            print("")
            #pp.pprint(export_data)
            # TODO: add export all user lists functionality
            print("id       | name")
            for data in export_data:
                print("{id} | {name}".format(name=data['name'],
                                             id=data['ids']['trakt']))
                #print("{id} | {name} | {items}".format(
                #       name=data['name'], id=data['ids']['trakt'], items=data['item_count'], own=data['user']['username']))
            print("")
            print(
                "Type in the id matching with the name of the list you want to export, or 'all' for all lists."
            )
            options.listid = str(input('Input: '))
            if options.listid == "all":
                for data in export_data:
                    options.listid = data['ids']['trakt']
                    options.list = "{username}'s user list with id: {id}, name: '{name}'".format(
                        username=data['user']['username'],
                        id=data['ids']['trakt'],
                        name=data['name'])
                    global response_arr  ## Cleanup global....
                    response_arr = []
                    export_data = helpers.api_get_userlist_items(
                        _trakt, _headers, _proxyDict, options, 1)
                    options.output = data['name'] + ".csv"
                    process_export_data(options, export_data)
            else:
                response_arr = []
                user_list = helpers.api_get_userlist(_trakt, _headers,
                                                     _proxyDict, options, 1)[0]
                # print(user_list)
                options.list = "{username}'s user list with id: {id}, name: '{name}'".format(
                    username=user_list['user']['username'],
                    id=user_list['ids']['trakt'],
                    name=user_list['name'])
                export_data = helpers.api_get_userlist_items(
                    _trakt, _headers, _proxyDict, options, 1)
                #pp.pprint(export_data)
                process_export_data(options, export_data)
        else:
            print("Error, no user lists found".format(type=options.type,
                                                      list=options.userlist))
            sys.exit(1)
    else:
        export_data = helpers.api_get_list(_trakt, _headers, _proxyDict,
                                           options, 1)
        if export_data:
            process_export_data(options, export_data)
        else:
            print("Error, no item(s) found for {type} from {list}".format(
                type=options.type, list=options.list))
            sys.exit(1)
Example #24
0
def q_learning(args):
    Q = {}
    train_scores = []
    eval_scores = []
    no_wins = 0
    train_ep = 0
    wins = []
    # for each episode ...
    # for train_ep in range(1, args.train_episodes + 1):
    while train_ep <= args.train_episodes:

        # ... get the initial state,
        score = 0
        start_room, start_pos, State.ROOMS, State.PORTALS = read_config(args)
        state = State(start_room, args.vision_radius)
        state.update_position(start_pos[0], start_pos[1])

        used_portal = False

        # display current state and sleep
        if args.verbose:
            display_state(state)
            sleep(args.sleep)

            # while current state is not terminal
        while not is_final_state(state, score):

            # choose one of the legal actions
            actions = get_legal_actions(state)
            action = choose_action(Q, state, actions, args)
            save_state_old = state.save_state()

            # apply action and get the next state and the reward
            reward, msg, used_portal = apply_action(state, action, used_portal)
            score += reward

            # Q-Learning
            save_state = state.save_state()
            max_val = -9999
            for a_new in get_all_actions():
                if (save_state,
                        a_new) in Q and Q[(save_state, a_new)] > max_val:
                    max_val = Q[(save_state, a_new)]

            if max_val == -9999:
                max_val = 0

            if (save_state_old, action) in Q:
                Q[(save_state_old, action)] += args.learning_rate * (reward + \
                            args.discount * max_val - Q[save_state_old, action])
            else:
                Q[(save_state_old, action)] = args.learning_rate * (reward + \
                                            args.discount * max_val)

                # display current state and sleep
            if args.verbose:
                print(msg)
                display_state(state)
                sleep(args.sleep)

            if "Gigel GOT OUT!" in msg:
                no_wins += 1

        # print("Episode %6d / %6d" % (train_ep, args.train_episodes))
        train_scores.append(score)
        wins.append(no_wins)

        # evaluate the greedy policy
        if train_ep % args.eval_every == 0:
            avg_score = .0

            for s in train_scores[-args.eval_every:]:
                avg_score += s

            eval_scores.append(avg_score / args.eval_every)

        train_ep += 1

    print "Gigel had " + str(no_wins) + " wins"

    # --------------------------------------------------------------------------
    if args.final_show:
        final_score = 0
        start_room, start_pos, State.ROOMS, State.PORTALS = read_config(args)
        state = State(start_room, args.vision_radius)
        state.update_position(start_pos[0], start_pos[1])

        used_portal = False

        while not is_final_state(state, final_score):
            action = best_action(Q, state, get_legal_actions(state))
            reward, msg, used_portal = apply_action(state, action, used_portal)
            final_score += reward
            print(msg)
            display_state(state)
            sleep(args.sleep)

    if args.plot_scores:
        from matplotlib import pyplot as plt
        import numpy as np
        plt.xlabel("Episode")
        plt.ylabel("Average Score")
        # plt.plot(
        #     np.linspace(1, args.train_episodes, args.train_episodes),
        #     np.convolve(train_scores, [0.2,0.2,0.2,0.2,0.2], "same"),
        #     linewidth = 1.0, color = "blue"
        # )
        plt.plot(np.linspace(args.eval_every, train_ep, len(eval_scores)),
                 eval_scores,
                 linewidth=2.0,
                 color="red")
        plt.show()

    return train_scores, wins
Example #25
0
config_filename = "config.json"
default_config = '{"card":0, "channel":"PCM", "adjust_amount":1, "adjust_type":"dB"}'

i = None
o = None
callback = None

from helpers import read_config, write_config

import os, sys
current_module_path = os.path.dirname(sys.modules[__name__].__file__)

config_path = os.path.join(current_module_path, config_filename)

try:
    config = read_config(config_path)
except (ValueError, IOError):
    print("Volume app: missing/broken config, restoring with defaults...")
    with open(config_path, "w") as f:
        f.write(default_config)
    config = read_config(config_path)

from subprocess import call, check_output

from ui import Menu, IntegerAdjustInput, Listbox, ellipsize


#amixer commands
def amixer_command(command):
    return call(['amixer'] + list(command))
Example #26
0
 def get_active_mode(self):
     return PasswordTypes(
         int(helpers.read_config().get(
             Constants.CONFIG_SECTION_GLOBAL,
             Constants.CONFIG_OPTION_ACTIVE_MODE)))
Example #27
0
signal.signal(signal.SIGUSR1, dumpthreads)

#Getting pyLCI config, it will be passed to input and output initializers
#If config at main_config_path exists, use that
#If not, use the backup path
#Also, log the errors to a file so that it can be debugged later

from helpers import read_config

is_emulator = emulator_flag_filename in os.listdir(".")

if not is_emulator:
    try:
        error_file = open(config_error_file, "w+")
        config = read_config(main_config_path)
    except Exception as e:
        print(repr(e))
        print("------------------------------")
        print("Couldn't read main config, using backup config!")
        error_file.write("Couldn't read main config: {}\n".format(repr(e)))
        error_file.write("Using backup config!\n")
        try:
            config = read_config(backup_config_path)
        except Exception as e:
            print("Couldn't read backup config, exiting!")
            error_file.write("Couldn't read backup config: {}\n".format(
                repr(e)))
            error_file.write("Exiting!\n")
            error_file.close()
            sys.exit(1)
Example #28
0
 def __init__(self, config=None):
     self.hostapd_conf = config if config else self.hostapd_conf
     self.config = read_config(self.hostapd_conf)
     self.mode = self.getMode()
Example #29
0
o = None

from time import sleep

from helpers import read_config, write_config
from ui import Menu, Printer, Checkbox, MenuExitException

import systemctl

import os,sys
current_module_path = os.path.dirname(sys.modules[__name__].__file__)

config_path = os.path.join(current_module_path, config_filename)

try:
    config = read_config(config_path)
except ValueError:
    print("Systemctl app: broken config, restoring with defaults...")
    with open(config_path, "w") as f:
        f.write(default_config)
    config = read_config(config_path)

def change_filters():
    global config
    all_types = [
    ["Slices", 'slice'],
    ["Sockets", 'socket'],
    ["Services", 'service'],
    ["Automounts", 'automount'],
    ["Mounts", 'mount'],
    ["Timers", 'timer'],
def main():
    """
        Main program loop
        * Read configuration file and validate
        * Read CSV file
        * Authenticate if require
        * Cleanup list from Trakt.tv
        * Inject data into Trakt.tv
        """
    # Parse inputs if any
    parser = argparse.ArgumentParser(description=desc, epilog=epilog)
    list_group = parser.add_mutually_exclusive_group(required=True)
    time_group = parser.add_mutually_exclusive_group(required=False)
    parser.add_argument('-v', action='version', version='%(prog)s 0.1')
    parser.add_argument(
        '-c',
        '--config',
        help='allow to overwrite default config filename, default %(default)s',
        action='store',
        type=str,
        dest='config',
        default='config.ini')
    parser.add_argument('-i',
                        '--input',
                        help='CSV file to import, default %(default)s',
                        nargs='?',
                        type=argparse.FileType('r'),
                        default=None,
                        required=True)
    time_group.add_argument(
        '-w',
        '--watched_at',
        help=
        'import watched_at date from CSV, the format must be UTC datetime. NOTE: Only works with history, not with watchlist/userlist. default %(default)s',
        default=False,
        action='store_true',
        dest='watched_at')
    now = datetime.now()
    time_group.add_argument(
        '-s',
        '--seen',
        help=
        'use custom time for watched_at if importing to history, default %(default)s. NOTE: Only works with history, not with watchlist/userlist. Use specific time if provided, default is current time.',
        nargs='?',
        const=now.strftime('%Y-%m-%dT%H:%M:%S.000Z'),
        action='store',
        type=str,
        dest='seen',
        default=False)
    parser.add_argument(
        '-f',
        '--format',
        help='allow to overwrite default ID type format, default %(default)s',
        choices=['imdb', 'tmdb', 'tvdb', 'tvrage', 'trakt'],
        dest='format',
        default='trakt')
    parser.add_argument('-t',
                        '--type',
                        help='allow to overwrite type, default %(default)s',
                        choices=['movies', 'shows', 'episodes'],
                        dest='type',
                        default='movies')
    list_group.add_argument(
        '-l',
        '--list',
        help='allow to overwrite default list, default %(default)s',
        choices=['watchlist', 'collection', 'history'],
        dest='list',
        default='watchlist')
    list_group.add_argument(
        '-u',
        '--userlist',
        help='allow to add item(s) to a user custom list, default %(default)s',
        dest='userlist',
        default=False,
        action='store_true')
    parser.add_argument('-C',
                        '--clean',
                        help='empty list prior to import, default %(default)s',
                        default=False,
                        action='store_true',
                        dest='clean')
    #parser.add_argument('-d', '--dryrun',
    #              help='do not update the account, default %(default)s',
    #              default=True, action='store_true', dest='dryrun')
    parser.add_argument(
        '-V',
        '--verbose',
        help='print additional verbose information, default %(default)s',
        default=False,
        action='store_true',
        dest='verbose')
    options = parser.parse_args()

    # Display debug information
    if options.verbose:
        print("Options: %s" % options)

    if options.seen and options.list != "history":
        print(
            "Error, you can only mark seen {0} when adding into the history list"
            .format(options.type))
        sys.exit(1)

    if options.seen:
        try:
            datetime.strptime(options.seen, '%Y-%m-%dT%H:%M:%S.000Z')
        except:
            sys.exit(
                "Error, invalid format, it's must be UTC datetime, eg: '2016-01-01T00:00:00.000Z'"
            )

    ## Read configuration and validate
    helpers.read_config(_trakt, options)

    ## Try refreshing to get new access token. If it doesn't work, user needs to authenticate again.
    helpers.api_auth_refresh(_trakt, _headers, options)

    # Display debug information
    if options.verbose:
        print("API Trakt: {}".format(_trakt))
        print("Authorization header: {}".format(_headers['Authorization']))

    # Handle userlist
    if options.userlist:
        print(_trakt['access_token'])
        export_data = helpers.api_get_userlists(_trakt, _headers, _proxyDict,
                                                options, 1)
        if export_data:
            print("")
            print("Found {0} user list(s)".format(len(export_data)))
            print("")
            #pp.pprint(export_data)
            print("id       | name")
            for data in export_data:
                print("{id} | {name}".format(name=data['name'],
                                             id=data['ids']['trakt']))
                #print("{id} | {name} | {items}".format(
                #       name=data['name'], id=data['ids']['trakt'], items=data['item_count'], own=data['user']['username']))
            print("")
            print(
                "Type in the id matching with the name of the list you want to import item(s) to."
            )
            options.listid = str(input('Input: '))
            print(
                "Importing to {username}'s user list with id: {id}, name: '{name}'"
                .format(username=data['user']['username'],
                        id=data['ids']['trakt'],
                        name=data['name']))
            response_arr = []
        else:
            print("Error, no user lists found")
            sys.exit(1)
    # else:
    #     export_data = helpers.api_get_list(_trakt, _headers, _proxyDict, options, 1)
    #     if not export_data:
    #         print("Error, no item(s) found for {type} from {list}".format(
    #             type=options.type, list=options.list))
    #         sys.exit(1)

    # Empty list prior to import
    if options.clean:
        cleanup_list(options)

    # Read CSV list of IDs
    read_ids = read_csv(options)

    # if IDs make the list into trakt format
    data = []
    results = {'sentids': 0, 'added': 0, 'existing': 0, 'not_found': 0}

    if options.list == 'history':
        options.time_key = 'watched_at'
    elif options.list == 'watchlist':
        options.time_key = 'listed_at'
    elif options.list == 'collection':
        options.time_key = 'collected_at'
    elif options.userlist != None:
        options.time_key = 'listed_at'

    if read_ids:
        print("Found {0} items to import".format(len(read_ids)))

        for myid in read_ids:
            # If id (row) exists and is not blank (has a format)
            if myid and myid[options.format]:
                # Record time format in csv we're importing from.
                # NOTE: Trakt API does not allow for custom times for listed_at and collected_at.
                # Therefore, options.time_key doesn't do anything for lists other than history.
                # However, this allows for any type of list to be imported in to default lists.
                if 'watched_at' in myid:
                    options.csv_time = 'watched_at'
                elif 'listed_at' in myid:
                    options.csv_time = 'listed_at'
                elif 'collected_at' in myid:
                    options.csv_time = 'collected_at'
                else:
                    options.csv_time = 'listed_at'
                if options.verbose:
                    pp.pprint(myid)
                row_title = "No title in csv"
                if options.seen:
                    row_time = options.seen
                elif options.watched_at:
                    row_time = myid[options.csv_time]
                else:
                    row_time = "No time option"

                # If format is not "imdb" it must be cast to an integer
                if not options.format == "imdb" and not myid[
                        options.format].startswith('tt'):
                    myid[options.format] = int(myid[options.format])
                if 'title' in myid:
                    row_title = "title: " + myid['title']
                if 'show_title' in myid and 'episode_title' in myid:
                    row_title = "title: " + myid[
                        'show_title'] + ", episode_title: " + myid[
                            'episode_title']

                if (options.type == "movies" or options.type == "shows"
                        or options.type == "episodes"):
                    data.append({
                        'ids': {
                            options.format: myid[options.format]
                        },
                        options.time_key: row_time
                    })
                else:
                    data.append(
                        {'ids': {
                            options.format: myid[options.format]
                        }})
                print(
                    "Importing record, {title}, id: {id}, {csv_time}: {time}".
                    format(title=row_title,
                           id=myid[options.format],
                           csv_time=options.csv_time,
                           time=row_time))
                # Import batch of 10 IDs
                if len(data) >= 10:
                    #pp.pprint(json.dumps(data))
                    results['sentids'] += len(data)
                    result = api_add_to_list(options, data)
                    if result:
                        # print("Result: {0}".format(result))
                        if 'added' in result and result['added']:
                            results['added'] += result['added'][options.type]
                        if 'existing' in result and result['existing']:
                            results['existing'] += result['existing'][
                                options.type]
                        if 'not_found' in result and result['not_found']:
                            results['not_found'] += len(
                                result['not_found'][options.type])
                    data = []
        # Import the rest
        if len(data) > 0:
            #pp.pprint(data)
            results['sentids'] += len(data)
            result = api_add_to_list(options, data)
            if result:
                # pp.pprint("Result: {0}".format(result))
                if 'added' in result and result['added']:
                    results['added'] += result['added'][options.type]
                if 'existing' in result and result['existing']:
                    results['existing'] += result['existing'][options.type]
                if 'not_found' in result and result['not_found']:
                    results['not_found'] += len(
                        result['not_found'][options.type])
    else:
        # TODO: Read STDIN to ID
        print("No items found, nothing to do.")
        sys.exit(0)

    print(
        "Overall imported {sent} {type}, results added:{added}, existing:{existing}, not_found:{not_found}"
        .format(sent=results['sentids'],
                type=options.type,
                added=results['added'],
                existing=results['existing'],
                not_found=results['not_found']))
Example #31
0
def analysis_main(args,
                  runnormal,
                  runtumor,
                  output,
                  normalname,
                  normalfastqs,
                  tumorname,
                  tumorfastqs,
                  ivauser=False,
                  igvuser=False,
                  hg38ref=False,
                  starttype=False):
    try:
        ################################################################
        # Write InputArgs to logfile
        config = read_wrapperconf()
        commandlogs = config["commandlogs"]
        #if not os.path.exists(commandlogs):
        #    os.makedirs(commandlogs)
        command = f"{sys.argv[0]}"
        current_date = time.strftime("%Y-%m-%d")
        commandlog = f"{commandlogs}/commands_{current_date}.log"
        for arg in vars(args):
            command = f"{command} --{arg} {getattr(args, arg)}"
        commandlogfile = open(commandlog, "a+")
        commandlogfile.write(f"{get_time()}" + "\n")
        commandlogfile.write(command + "\n")
        ################################################################

        if output.endswith("/"):
            output = output[:-1]
        if normalfastqs.endswith("/"):
            normalfastqs = normalfastqs[:-1]
        if tumorfastqs.endswith("/"):
            tumorfastqs = tumorfastqs[:-1]

        #################################################################
        # Validate Inputs
        ################################################################
        error_list = []

        if hg38ref:
            logger(f"hg38 argument given with value: {hg38ref}")
            if hg38ref != "yes":
                logger(
                    "argument is not yes, if you want hg19 simply dont provide hg38 argument, exiting"
                )
                error_list.append(f"invalid hg38 argument value: {hg38ref}")

        if hg38ref == "yes":
            mainconf = "hg38conf"
        else:
            mainconf = "hg19conf"
        configdir = config["configdir"]
        mainconf_name = config[mainconf]
        mainconf_path = f"{configdir}/{mainconf_name}"

        # validate fastqdirs
        if starttype == "force":
            f_tumorfastqs = ""
            f_normalfastqs = ""
        else:
            if not os.path.isdir(normalfastqs):
                error_list.append(
                    f"{normalfastqs} does not appear to be a directory")
            else:
                f_normalfastqs = glob.glob(f"{normalfastqs}/*fastq.gz")
                if not f_normalfastqs:
                    logger(f"Warning: No fastqs found in normaldir")
                    f_normalfastqs = glob.glob(f"{normalfastqs}/*fasterq")
                    if not f_normalfastqs:
                        error_list.append(
                            f"No fastqs or fasterqs found in normaldir")

            if not os.path.isdir(tumorfastqs):
                error_list.append(
                    f"{tumorfastqs} does not appear to be a directory")
            else:
                f_tumorfastqs = glob.glob(f"{tumorfastqs}/*fastq.gz")
                if not f_tumorfastqs:
                    logger(f"Warning: No fastqs found in tumordir")
                    f_tumorfastqs = glob.glob(f"{tumorfastqs}/*fasterq")
                    if not f_tumorfastqs:
                        error_list.append(
                            f"No fastqs or fasterqs found in tumordir")
        # validate iva and igv users if supplied
        if igvuser:
            mainconf = helpers.read_config(mainconf_path)
            igvdatadir = mainconf["rules"]["share_to_igv"]["igvdatadir"]
            if not os.path.isdir(f"{igvdatadir}/{igvuser}"):
                error_list.append(
                    f"{igvuser} does not appear to be a valid preconfigured IGV user"
                )
        if ivauser:
            ivaconf = read_ivaconf()
            if ivauser not in ivaconf["ivausers"]:
                error_list.append(
                    f"{ivauser} is not a valid preconfigured IVA user")

        # prepare outputdirectory
        if not os.path.isdir(output):
            try:
                os.mkdir(output)
            except Exception as e:
                error_list.append(
                    f"outputdirectory: {output} does not exist and could not be created"
                )

        if error_list:
            logger("Errors found in arguments to script:")
            for arg in vars(args):
                logger(f"{arg} = {getattr(args, arg)}")
            for error in error_list:
                logger(error)
            logger("Exiting")
            sys.exit()

        #################################################################
        # Prepare AnalysisFolder
        #################################################################
        date, _, _, chip, *_ = runnormal.split('_')
        normalid = '_'.join([normalname, date, chip])
        date, _, _, chip, *_ = runtumor.split('_')
        tumorid = '_'.join([tumorname, date, chip])

        samplelogs = f"{output}/logs"
        if not os.path.isdir(samplelogs):
            os.mkdir(samplelogs)
        runconfigs = f"{output}/configs"
        if not os.path.isdir(runconfigs):
            os.mkdir(runconfigs)

        # copying configfiles to analysisdir
        clusterconf = config["clusterconf"]
        copyfile(f"{configdir}/{clusterconf}", f"{runconfigs}/{clusterconf}")
        copyfile(f"{configdir}/{mainconf_name}",
                 f"{runconfigs}/{mainconf_name}")

        samplelog = f"{samplelogs}/{tumorid}.log"
        logger("Input validated:", samplelog)
        logger(f"{command}", samplelog)
        logger("Fastqs found for normal:", samplelog)
        logger(f"{f_normalfastqs}", samplelog)
        logger("Fastqs found for tumor:", samplelog)
        logger(f"{f_tumorfastqs}", samplelog)

        ##################################################################
        # Create AnalysisConfigfile
        ##################################################################
        analysisdict = {}
        analysisdict["normalname"] = normalname
        analysisdict["normalid"] = normalid
        analysisdict["normalfastqs"] = [normalfastqs]
        analysisdict["tumorname"] = tumorname
        analysisdict["tumorid"] = tumorid
        analysisdict["tumorfastqs"] = [tumorfastqs]
        analysisdict["igvuser"] = igvuser
        analysisdict["ivauser"] = ivauser
        analysisdict["workingdir"] = output

        if hg38ref == "yes":
            analysisdict["reference"] = "hg38"
        else:
            analysisdict["reference"] = "hg19"

        with open(f"{runconfigs}/{tumorid}_config.json", 'w') as analysisconf:
            json.dump(analysisdict, analysisconf, ensure_ascii=False, indent=4)

        ###################################################################
        # Prepare Singularity Binddirs
        binddirs = config["singularitybinddirs"]
        binddir_string = ""
        for binddir in binddirs:
            source = binddirs[binddir]["source"]
            if not analysisdict["reference"] in source:
                if not "petagene" in source:
                    #print("Wrong reference")
                    #print(f"analysisdict_reference: {analysisdict['reference']}")
                    #print(f"source: {source}")
                    #elif not "petagene" in source:
                    # print("not petegene")
                    #print(f"source: {source}")
                    continue
            destination = binddirs[binddir]["destination"]
            logger(
                f"preparing binddir variable {binddir} source: {source} destination: {destination}"
            )
            binddir_string = f"{binddir_string}{source}:{destination},"
            for normalfastqdir in analysisdict["normalfastqs"]:
                binddir_string = f"{binddir_string}{normalfastqdir},"
            for tumorfastqdir in analysisdict["tumorfastqs"]:
                binddir_string = f"{binddir_string}{tumorfastqdir},"
        binddir_string = f"{binddir_string}{output}"
        print(binddir_string)

        ###################################################################
        # Start SnakeMake pipeline
        ###################################################################
        scriptdir = os.path.dirname(
            os.path.realpath(__file__))  # find current dir

        snakemake_path = config["snakemake_env"]
        os.environ["PATH"] += os.pathsep + snakemake_path
        my_env = os.environ.copy()
        snakemake_args = f"snakemake -s pipeline.snakefile --configfile {runconfigs}/{tumorid}_config.json --dag | dot -Tsvg > {samplelogs}/dag_{current_date}.svg"
        # >>>>>>>>>>>> Create Dag of pipeline
        subprocess.run(snakemake_args, shell=True, env=my_env)  # CREATE DAG

        snakemake_args = f"snakemake -s pipeline.snakefile --configfile {runconfigs}/{tumorid}_config.json --use-singularity --singularity-args '-e --bind {binddir_string}' --cluster-config configs/cluster.yaml --cluster \"qsub -S /bin/bash -pe mpi {{cluster.threads}} -q {{cluster.queue}} -N {{cluster.name}} -o {samplelogs}/{{cluster.output}} -e {samplelogs}/{{cluster.error}} -l {{cluster.excl}}\" --jobs 999 --latency-wait 60 --directory {scriptdir} &>> {samplelog}"
        # >>>>>>>>>>>> Start pipeline
        subprocess.run(snakemake_args, shell=True,
                       env=my_env)  # Shellscript pipeline

    except Exception as e:
        tb = traceback.format_exc()
        logger(f"Error in script:")
        logger(f"{e} Traceback: {tb}")
        sys.exit(1)

    if os.path.isfile(f"{output}/reporting/workflow_finished.txt"):
        # these functions are only executed if snakemake workflow has finished successfully
        yearly_stats(args.tumorsample, args.normalsample)
        petagene_compress_bam(args.outputdir, args.tumorsample)
Example #32
0
 def check_user_exists(self, username):
     return helpers.read_config().has_section(
         Constants.CONFIG_USERNAME_PREFIX + username)