Пример #1
0
    def __init__(self,
                 teams_file="settings/team_weights.txt",
                 words_file="settings/game_words.txt",
                 full_log=None,
                 game_log=None):
        self.full_log = full_log
        self.game_log = game_log
        if self.full_log is not None:
            self.full_log.info("SpyMaster initialising...")
        if self.game_log is not None:
            self.game_log.info("SpyMaster initialising...")
        # spymaster stuff
        self.settings = load_settings(sett_file="settings/spymaster_setts.txt",
                                      default_dict={
                                          "max_top_hints": 10,
                                          "max_levels": 2,
                                          "model_name": "glove-wiki-100",
                                          "use_annoy_indexer": True,
                                          "vocab_limit": 0,
                                          "game_hint_naive_method": False
                                      })

        self.strategy = load_settings(
            sett_file="settings/spymaster_setts.txt",
            default_dict={
                "level_{}_limit".format(str(x + 1)): 0
                for x in range(self.settings["max_levels"])
            })

        # game stuff (what game words are available depends on the word model used so it is loaded in load_model)
        self.game_words = list()

        # teams stuff
        self.team_weights = load_settings(sett_file=teams_file,
                                          default_dict={
                                              "t": 30,
                                              "b": -1,
                                              "o": -3,
                                              "k": -10
                                          })
        self.team_words = dict(
        )  # made as an attribute to save passing back and forth while running rounds

        # nlp stuff
        self.word_model = self.load_word_model(
            model_name=self.settings["model_name"], game_words_file=words_file
        )  # keyed vector model for generating hints

        self.indexer = self.load_indexer(
            model_name=self.settings["model_name"])

        self.ls = LancasterStemmer()  # stemmer for checking hint legality
        self.spacy_nlp = spacy.load(
            "en_core_web_sm")  # lemmatiser for checking hint legality

        if self.full_log is not None:
            self.full_log.info("SpyMaster initialised!")
        if self.game_log is not None:
            self.game_log.info("SpyMaster initialised!")
Пример #2
0
def render(view_type=None, login=None, code=None):
    """ Renders the login HTML. Exits. """

    # init the settings object
    settings = utils.load_settings()

    template_file_path = "templates/login.html"
    if view_type == 'reset':
        template_file_path = 'templates/reset_password.html'

    # grab up the template
    html_raw = file(template_file_path, "rb").read()
    html_tmp = Template(html_raw)

    # create the output
    output = html.meta.basic_http_header
    output += html_tmp.safe_substitute(
        version=settings.get('application', 'version'),
        title=settings.get('application', 'title'),
        api_url=api.get_api_url(),
        prod_url=settings.get('application', 'tld'),
        released=utils.get_latest_update_string(),
        login=login,
        code=code,
    )
    logger = utils.get_logger()
    # render; don't exit (in case we're metering performance)
    print(output)
Пример #3
0
def smtp_relay_get():
    config = utils.load_settings(env)

    dkim_rrtxt = ""
    rr = config.get("SMTP_RELAY_DKIM_RR", None)
    if rr is not None:
        if rr.get("p") is None:
            raise ValueError("Key doesn't exist!")
        for c, d in (("v", "DKIM1"), ("h", None), ("k", "rsa"), ("n", None),
                     ("s", None), ("t", None)):
            txt = rr.get(c, d)
            if txt is None:
                continue
            else:
                dkim_rrtxt += f"{c}={txt}; "
        dkim_rrtxt += f"p={rr.get('p')}"

    return {
        "enabled": config.get("SMTP_RELAY_ENABLED", False),
        "host": config.get("SMTP_RELAY_HOST", ""),
        "port": config.get("SMTP_RELAY_PORT", None),
        "user": config.get("SMTP_RELAY_USER", ""),
        "authorized_servers": config.get("SMTP_RELAY_AUTHORIZED_SERVERS", []),
        "dkim_selector": config.get("SMTP_RELAY_DKIM_SELECTOR", None),
        "dkim_rr": dkim_rrtxt
    }
Пример #4
0
async def check_group_videos(vk):
    plugin.temp_data['s'] = load_settings(plugin)
    # Получаем количество видео в группе
    values = {'owner_id': -int(plugin.temp_data['s']['public_id']), 'count': 0}
    resp = await vk.method('video.get', values)
    if resp:
        plugin.temp_data['public_video_count'] = resp.get('count')
Пример #5
0
 def __init__(self):
     self.settings = utils.load_settings()
     self.event = {}
     self.read_event()
     if not os.environ.has_key('SENSU_TEST'):
         self.filter()
         self.handle()
Пример #6
0
 def __init__(self):
     self.settings = utils.load_settings()
     self.event = {}
     self.read_event()
     if not os.environ.has_key('SENSU_TEST'):
         self.filter()
         self.handle()
Пример #7
0
def smtp_relay_get():
    config = utils.load_settings(env)
    return {
        "enabled": config.get("SMTP_RELAY_ENABLED", True),
        "host": config.get("SMTP_RELAY_HOST", ""),
        "auth_enabled": config.get("SMTP_RELAY_AUTH", False),
        "user": config.get("SMTP_RELAY_USER", "")
    }
Пример #8
0
def test(cases):
    utils.load_settings(args, "settings/res128.json", cases)

    def g_func():
        x = Generator(args.base_ch, 32, 10, n_projected_dims=4).cuda()
        return x

    def z_func():
        return torch.FloatTensor(
            truncnorm.rvs(-1.5, 1.5, size=(args.batch_size, 128))).cuda()

    def y_func():
        return torch.eye(10)[torch.randint(0, 10, (args.batch_size, ))].cuda()

    inceptions_score_fid_all("cifar_case" + str(cases), g_func, z_func, y_func,
                             args.use_multi_gpu, 50000 // args.batch_size + 1,
                             "cifar10_train.pkl")
Пример #9
0
    def start(self):
        """Blocking call to delegate control to frontend."""
        self.settings = utils.load_settings()
        self.systems = utils.load_systems(self.settings)

        # todo : configure joystick if found
        pass

        self.handlers = [SystemsHandler(self.systems)]
        self.handlers[-1].render()
Пример #10
0
def default_quota_set():
    config = utils.load_settings(env)
    try:
        config["default-quota"] = validate_quota(request.values.get('default_quota'))
        utils.write_settings(config, env)

    except ValueError as e:
        return ("ERROR: %s" % str(e), 400)

    return "OK"
Пример #11
0
 def __init__(self):
     log.info("Operative initialising...")
     self.team_words = dict()
     self.board_words = list()
     self.hints = dict()
     self.settings = load_settings(sett_file="settings/spymaster_setts.txt",
                                   default_dict={"max_top_hints": 10,
                                                 "max_levels": 2})
     self.evaluation_methods = [self.__concept_net_eval, self.__word_net_path_eval,
                                self.__word_net_wup_eval, self.__word_net_lch_eval]
     log.info("Operative initialised!")
 def _load_settings(self):
     """
     Load the settings file. The file contains integrity check settings,
     ignored entities configuration and other useful settings.
     """
     self.lg.debug('Loading settings file from %s', self.args.settings)
     try:
         self.settings = utils.load_settings(self.args.settings)
     except Exception as e:
         raise ManagerError('Error loading settings: %s' % e)
     self.lg.debug('Settings parsed: %s', self.settings)
Пример #13
0
 def __init__(self, resolution_param='', iterations_param=''):
     linear_data, cuircular_data, hyperbolic_data, resolution = load_settings()
     self.__linear_data = linear_data
     self.__circular_data = cuircular_data
     self.__hyperbolic_data = hyperbolic_data
     if (resolution_param != ''):
         self.__resolution = resolution_param
     else:
         self.__resolution = resolution
     self.__iterations = iterations_param
     self.__constants_compute()
Пример #14
0
def check_miab_version(env, output):
	config = load_settings(env)

	if config.get("privacy", True):
		output.print_warning("Mail-in-a-Box version check disabled by privacy setting.")
	else:
		this_ver = what_version_is_this(env)
		latest_ver = get_latest_miab_version()
		if this_ver == latest_ver:
			output.print_ok("Mail-in-a-Box is up to date. You are running version %s." % this_ver)
		else:
			output.print_error("A new version of Mail-in-a-Box is available. You are running version %s. The latest version is %s. For upgrade instructions, see https://mailinabox.email. "
				% (this_ver, latest_ver))
Пример #15
0
def check_miab_version(env, output):
	config = load_settings(env)

	if config.get("privacy", True):
		output.print_warning("Mail-in-a-Box version check disabled by privacy setting.")
	else:
		this_ver = what_version_is_this(env)
		latest_ver = get_latest_miab_version()
		if this_ver == latest_ver:
			output.print_ok("Mail-in-a-Box is up to date. You are running version %s." % this_ver)
		else:
			output.print_error("A new version of Mail-in-a-Box is available. You are running version %s. The latest version is %s. For upgrade instructions, see https://mailinabox.email. "
				% (this_ver, latest_ver))
Пример #16
0
async def setup_counter(vk):
    plugin.temp_data['s'] = load_settings(plugin)

    plugin.temp_data['start_time'] = datetime.datetime.now()

    plugin.temp_data['24h'] = datetime.datetime.now()

    plugin.temp_data['messages'] = 0
    plugin.temp_data['messages_24h'] = 0

    plugin.temp_data['commands'] = {}

    if plugin.temp_data['s']['set_status']:
        schedule_coroutine(update_counters(vk))
Пример #17
0
    def __init__(self, screen: pygame.Surface):
        self.screen = screen

        self.options = [
            KineticButton("<",
                          15,
                          15,
                          40,
                          40,
                          "back",
                          "Calibri",
                          30,
                          background_color=(70, 100, 120),
                          hover_color=(240, 230, 50),
                          borders=2),
            KineticButton("Activé",
                          200,
                          200,
                          150,
                          30,
                          "toggle_fullscreen",
                          "Calibri",
                          30,
                          background_color=(70, 100, 120),
                          hover_color=(240, 230, 50),
                          borders=2),
            KineticButton("Appliquer",
                          20,
                          300,
                          130,
                          30,
                          "apply",
                          'Calibri',
                          30,
                          background_color=(70, 100, 120),
                          hover_color=(240, 230, 50),
                          borders=2)
        ]
        self.hover = -1

        self.config = utils.load_settings()
        self._update_button()
        self.actual_fullscreen = self.config["fullscreen"]

        self.volume_x = self.config["sound"] * 500 + 200
        self.click_volume = False
        self.offset_click = 0
Пример #18
0
    def __init__(self, master, *args):
        """the settings 'tab' widget of the GUI"""
        super().__init__(master)
        self.speed_options = ["Fast", "Medium", "Slow"]
        self.yes_no_options = ["Yes", "No"]
        self.tooltips = {
            "Spotify Path":
            "The path to Spotify on your local computer. This is where exactly on your PC Spotify.exe is located.",
            "Pause When Locked":
            "Whether or not to stop the script from restarting Spotify while the Windows account is locked. This helps prevent Spotify restarting when using Spotify on phone and it reaches an ad.",
            "Push To Back":
            "Whether or not to push Spotify below the active window after restart.",
            "Create Shortcut":
            "Whether or not to create a desktop shortcut when the GUI launches. Setting this to yes will create a shortcut or overwrite the previous shortcut on GUI launch, so even when you delete it a new one will be generated. Set this to no, and it will no longer generate new ones.",
        }
        self.settings = load_settings()

        self.widget()
Пример #19
0
def run_network_checks(env, output):
	# Also see setup/network-checks.sh.

	output.add_heading("Network")

	check_ufw(env, output)

	# Stop if we cannot make an outbound connection on port 25. Many residential
	# networks block outbound port 25 to prevent their network from sending spam.
	# See if we can reach one of Google's MTAs with a 5-second timeout.
	code, ret = shell("check_call", ["/bin/nc", "-z", "-w5", "aspmx.l.google.com", "25"], trap=True)
	if ret == 0:
		output.print_ok("Outbound mail (SMTP port 25) is not blocked.")
	else:
		output.print_warning("""Outbound mail (SMTP port 25) seems to be blocked by your network. You
			will not be able to send any mail without a SMTP relay. Many residential networks block port 25 to prevent
			hijacked machines from being able to send spam. A quick connection test to Google's mail server on port 25
			failed.""")

	# Stop if the IPv4 address is listed in the ZEN Spamhaus Block List.
	# The user might have ended up on an IP address that was previously in use
	# by a spammer, or the user may be deploying on a residential network. We
	# will not be able to reliably send mail in these cases.
	rev_ip4 = ".".join(reversed(env['PUBLIC_IP'].split('.')))
	zen = query_dns(rev_ip4+'.zen.spamhaus.org', 'A', nxdomain=None)
	if zen is None:
		output.print_ok("IP address is not blacklisted by zen.spamhaus.org.")
	else:
		output.print_error("""The IP address of this machine %s is listed in the Spamhaus Block List (code %s),
			which may prevent recipients from receiving your email. See http://www.spamhaus.org/query/ip/%s."""
			% (env['PUBLIC_IP'], zen, env['PUBLIC_IP']))

	# Check if a SMTP relay is set up. It's not strictly required, but on some providers
	# it might be needed.
	config = load_settings(env)
	if config.get("SMTP_RELAY_ENABLED"):
		if config.get("SMTP_RELAY_AUTH"):
			output.print_ok("An authenticated SMTP relay has been set up via port 587.")
		else:
			output.print_warning("A SMTP relay has been set up, but it is not authenticated.")
	elif ret == 0:
		output.print_ok("No SMTP relay has been set up (but that's ok since port 25 is not blocked).")
	else:
		output.print_error("No SMTP relay has been set up. Since port 25 is blocked, you will probably not be able to send any mail.")
Пример #20
0
def smtp_relay_set():
    from editconf import edit_conf
    config = utils.load_settings(env)
    newconf = request.form
    try:
        # Write on daemon settings
        config["SMTP_RELAY_ENABLED"] = (newconf.get("enabled") == "true")
        config["SMTP_RELAY_HOST"] = newconf.get("host")
        config["SMTP_RELAY_AUTH"] = (newconf.get("auth_enabled") == "true")
        config["SMTP_RELAY_USER"] = newconf.get("user")
        utils.write_settings(config, env)
        # Write on Postfix configs
        edit_conf("/etc/postfix/main.cf", [
            "relayhost=" +
            (f"[{config['SMTP_RELAY_HOST']}]:587" if
             config["SMTP_RELAY_ENABLED"] else ""), "smtp_sasl_auth_enable=" +
            ("yes" if config["SMTP_RELAY_AUTH"] else "no"),
            "smtp_sasl_security_options=" +
            ("noanonymous" if config["SMTP_RELAY_AUTH"] else "anonymous"),
            "smtp_sasl_tls_security_options=" +
            ("noanonymous" if config["SMTP_RELAY_AUTH"] else "anonymous")
        ],
                  delimiter_re=r"\s*=\s*",
                  delimiter="=",
                  comment_char="#")
        if config["SMTP_RELAY_AUTH"]:
            # Edit the sasl password
            with open("/etc/postfix/sasl_passwd", "w") as f:
                f.write(
                    f"[{config['SMTP_RELAY_HOST']}]:587 {config['SMTP_RELAY_USER']}:{newconf.get('key')}\n"
                )
            utils.shell("check_output",
                        ["/usr/bin/chmod", "600", "/etc/postfix/sasl_passwd"],
                        capture_stderr=True)
            utils.shell("check_output",
                        ["/usr/sbin/postmap", "/etc/postfix/sasl_passwd"],
                        capture_stderr=True)
        # Restart Postfix
        return utils.shell("check_output",
                           ["/usr/bin/systemctl", "restart", "postfix"],
                           capture_stderr=True)
    except Exception as e:
        return (str(e), 500)
Пример #21
0
    def run_random_round(self, out_file=None):
        if self.full_log is not None:
            self.full_log.info("Running round with random teams...")
            self.full_log.debug("Shuffling words")
        if self.game_log is not None:
            self.game_log.info("Running round with random teams...")

        shuffle(self.game_words)
        word_gen = cycle(self.game_words)

        if self.full_log is not None:
            self.full_log.debug("Loading team sizes...")
        team_sizes = load_settings(sett_file="settings/team_sizes.txt",
                                   default_dict={
                                       "t": 8,
                                       "o": 8,
                                       "k": 1,
                                       "b": 8
                                   })
        if self.full_log is not None:
            self.full_log.debug("Team sizes: {0}".format(" - ".join(
                [k + ":" + str(team_sizes[k]) for k in ["t", "o", "b", "k"]])))

        if self.full_log is not None:
            self.full_log.debug("Generating team words...")
        self.team_words = {
            team: [next(word_gen) for i in range(team_sizes[team])]
            for team in ["t", "o", "b", "k"]
        }
        if self.full_log is not None:
            self.full_log.info("Team words:\n{0}".format("\n".join([
                team + ": " + ", ".join(self.team_words[team])
                for team in ["t", "o", "b", "k"]
            ])))
        if self.game_log is not None:
            self.game_log.info("Randomly generated teams are:\n{0}".format(
                "\n".join([
                    team + ": " + ", ".join(self.team_words[team])
                    for team in ["t", "o", "b", "k"]
                ])))

        return self.__run_round(out_file=out_file)
Пример #22
0
def check_miab_version(env, output):
	config = load_settings(env)

	try:
		this_ver = what_version_is_this(env)
	except:
		this_ver = "Unknown"

	if config.get("privacy", True):
		output.print_warning("You are running version Mail-in-a-Box %s. Mail-in-a-Box version check disabled by privacy setting." % this_ver)
	else:
		latest_ver = get_latest_miab_version()

		if this_ver == latest_ver:
			output.print_ok("Mail-in-a-Box is up to date. You are running version %s." % this_ver)
		elif latest_ver is None:
			output.print_error("Latest Mail-in-a-Box version could not be determined. You are running version %s." % this_ver)
		else:
			output.print_error("A new version of Mail-in-a-Box is available. You are running version %s. The latest version is %s. For upgrade instructions, see https://mailinabox.email. "
				% (this_ver, latest_ver))
Пример #23
0
def check_miab_version(env, output):
	config = load_settings(env)

	try:
		this_ver = what_version_is_this(env)
	except:
		this_ver = "Unknown"

	if config.get("privacy", True):
		output.print_warning("You are running version Mail-in-a-Box %s. Mail-in-a-Box version check disabled by privacy setting." % this_ver)
	else:
		latest_ver = get_latest_miab_version()

		if this_ver == latest_ver:
			output.print_ok("Mail-in-a-Box is up to date. You are running version %s." % this_ver)
		elif latest_ver is None:
			output.print_error("Latest Mail-in-a-Box version could not be determined. You are running version %s." % this_ver)
		else:
			output.print_error("A new version of Mail-in-a-Box is available. You are running version %s. The latest version is %s. For upgrade instructions, see https://wspecsbox.email. "
				% (this_ver, latest_ver))
Пример #24
0
def create_model():
    possible_models = [
        "shufflenet_v2_x1_0",
        "resnet18",
        "mobilenet_v2",
        "mnasnet1_0",
        "mnasnet1_3",
    ]

    settings = utils.load_settings()
    model_name = random.choice(possible_models)
    print("Test:: Model: {}".format(model_name))
    model = getattr(torchvision.models, model_name)()

    script_model = torch.jit.script(model)
    script_model.save(os.environ["MODEL"])

    settings["model_name"] = model_name
    with open(os.environ["SETTINGS"], "w") as file:
        yaml.dump(settings, file, default_flow_style=False)
Пример #25
0
    def init_vars(self, prev_sc):
        # 1. Initializing variables
        # a. loading the settings
        print("| Loading settings...           |")
        self.settings = utils.load_settings()
        print("| -- loaded settings...         |\n|", self.settings)

        # b. starting necessary variables
        print("| Loading experiment variable...|")
        self.experiment = prev_sc.experiment

        # c. intializing round variables
        print("| Loading round variables       |")
        self.clicks = ''
        self.points = tkinter.StringVar()
        self.points.set(0)
        self.repeat = 0
        self.reinforcement = []

        # d. game variables
        print("| Loading game variables...     |")
        self.combinations = ['EEEE','EEED','EEDE','EDEE',\
        'DEEE','EEDD','EDDE','DDEE','DEED','DEDE','EDED','DDDE',\
        'DDED','DEDD','EDDD','DDDD']
        self.frequency = {'EEEE':1,'EEED':1,'EEDE':1,'EDEE':1,\
        'DEEE':1,'EEDD':1,'EDDE':1,'DDEE':1,'DEED':1,'DEDE':1,'EDED':1,'DDDE':1,\
        'DDED':1,'DEDD':1,'EDDD':1,'DDDD':1}
        self.total_frequency = {'EEEE':1,'EEED':1,'EEDE':1,'EDEE':1,\
        'DEEE':1,'EEDD':1,'EDDE':1,'DDEE':1,'DEED':1,'DEDE':1,'EDED':1,'DDDE':1,\
        'DDED':1,'DEDD':1,'EDDD':1,'DDDD':1}
        self.memo_accuracy = 0
        self.memo_reinforced = []
        self.saved_order = []
        self.stages = []

        # e. result variables
        print("| Loading result variables...   |")
        self.results = []
        self.result_set = set()
        self.blocks = []
Пример #26
0
def init():
    """Fonction d'initialisation de l'application"""
    # D'abord on vérifie l'existence de tous les répertoires utiles
    repertories = [
        "scenes", "assets", "saves", "assets/backgrounds", "assets/musics"
    ]
    for rep in repertories:
        if not os.path.isdir(rep):
            os.mkdir(rep)

    # Ensuite on construit notre fenêtre pygame
    pygame.init()
    pygame.font.init()
    pygame.mixer.init()

    screen = pygame.display.set_mode((1280, 720))

    cfg = utils.load_settings()
    if cfg["fullscreen"] > 0:
        pygame.display.toggle_fullscreen()

    return screen
Пример #27
0
import sys, os, h5py
import numpy as np
import tensorflow as tf
from rich.console import Console

console = Console()

sys.path.append(r"/home/qnl/Git-repositories")
from utils import load_settings, load_repackaged_data, get_data, split_data_same_each_time, dark_mode_compatible
from qutrit_lstm_network import pad_labels

dark_mode_compatible(dark_mode_color=r'#86888A')

settings = load_settings(
    r"/home/qnl/Git-repositories/qnl_nonmarkov_ml/qutrit_lstm/settings.yaml")

# NOTE: Note that most of the settings below must be equal to the settings in prep.py
# Path that contains the training/validation dataset.
filepath = settings['voltage_records']['filepath']
filename = settings['voltage_records']['filename']

# last_timestep determines the length of trajectories used for training in units of strong_ro_dt.
# Must be <= the last strong readout point
mask_value = settings['training'][
    'mask_value']  # This is the mask value for the data, not the missing labels
num_features = settings['voltage_records']['num_features']  # I and Q
strong_ro_dt = settings['voltage_records'][
    'strong_ro_dt']  # Time interval for strong readout in the dataset in seconds

console.print("Loading data...", style="bold red")
Пример #28
0
        weight
      ))

    del imgs[:fps_ratio]

    elapsed_time = time.process_time()-time_start
    time_list.pop(0)
    time_list.append(elapsed_time)
    average_time = sum(time_list) / len(time_list)

    print('Performance:', '%.3f' % average_time,
          'seconds/frame -', '%.3f' % (1/average_time), 'FPS'
    )
    print('Estimation:', time.strftime('%H:%M:%S', time.gmtime(math.ceil(avg_time*int(output_nframes-i)))))
    print(f'Progress: {i}/{output_frames} -', '%.3f' % (100*i/output_frames), '%')

  output_video.release()
  input_video.release()

  utils.add_audio(input_name, output_name)

  if settings['cv_colourfix']:
    Path(input_name).unlink()
    Path('to-fix_{input_name}').rename(input_name)


if __name__ == '__main__':
  settings = utils.load_settings()
  print(settings)
  process_video(settings)
Пример #29
0
    def load_complete_tree(self, filter_enabled=True, complete=True):
        '''Creates a folder hash table and another non-folder hash table
        stores nodes in the first hash and the id is the key (e.g. /MyDrive/Books/Fiction)
        the second one simply stores the file struct and the id is the
        key (e.g. Star Wars.pdf).

        :param filter_enabled: if whitelist or blacklist is enabled.
        :type filter_enabled: bool.
        :param complete: If will link files to tree.
        :type complete: bool.
        '''
        whitelist = blacklist = None
        if filter_enabled:
            settings = load_settings()
            if settings['whitelist-enabled']:
                whitelist = settings['whitelist-files']
            elif settings['blacklist-enabled']:
                blacklist = settings['blacklist-files']

        # =========== debug code ===========
        # just to keep local query to not request files every run
        # if not os.path.exists('folders.dat'):
        #     query = 'trashed = false and mimeType = "%s"' % TYPES['folder']
        #     fields = 'nextPageToken, files(name, id, parents, mimeType)'
        #     folders_metadata = []
        #     pageToken = None
        #     while True:
        #         result = self.service.files().list(q=query,\
        #                                         fields=fields,\
        #                                         pageToken=pageToken,\
        #                                         pageSize=1000).execute()
        #         folders_metadata += result.get('files', [])
        #         pageToken = result.get('nextPageToken')
        #         if not pageToken:
        #             break
        #     with open('folders.dat', 'wb') as f:
        #         pickle.dump(folders_metadata, f, pickle.HIGHEST_PROTOCOL)
        # else:
        #     with open('folders.dat', 'rb') as f:
        #         folders_metadata = pickle.load(f)
        # =========== debug code ===========

        # =========== real code ===========
        query = 'trashed = false and mimeType = "%s"' % TYPES['folder']
        folders_metadata = []
        pageToken = None
        while True:
            fields = 'nextPageToken, files(name, id, parents, mimeType)'
            result = self.service.files().list(q=query,\
                                               fields=fields,\
                                               pageToken=pageToken,\
                                               pageSize=1000).execute()
            folders_metadata += result.get('files', [])
            pageToken = result.get('nextPageToken')
            if not pageToken:
                break
        # =========== real code ===========
        # just the folders vector, will be converted to hash bellow
        folders = [f for f in folders_metadata\
                   if 'parents' in f]
        self.root.children = []  # empty tree
        stack = []  # [metadata]
        self.folders_hash = {}
        i = 0  # used to pin the node that is looking for a parent
        j = 0  # used to pin the next node that will look for the parent
        while folders or stack:
            enqueue = None
            j = 0
            for folder in folders:
                if folders[i]['parents'][0] == folder['id']:
                    enqueue = folders[i]
                    break
                j += 1

            if enqueue:
                stack.append(enqueue)
                folders.pop(i)
                if j < i:
                    i = j
                else:
                    i = j - 1
            elif folders[i]['parents'][0] == self.root.get_id():
                title = ('/' + folders[i]['name'] + '/')
                if (blacklist and title in blacklist)\
                or (whitelist and not any(title in elem for elem in whitelist)):
                    stack = []
                    folders.pop(i)
                    i = 0
                    continue
                child = DriveFile(self.root, folders[i])
                self.folders_hash[folders[i]['id']] = child

                while stack:
                    item = stack.pop()
                    title = title + '/' + item['name'] + '/'
                    if (blacklist and (title in blacklist)) \
                    or (whitelist and not \
                    any(elem in title for elem in whitelist)):
                        stack = []
                        break
                    parent = child
                    child = DriveFile(parent, item)
                    self.folders_hash[item['id']] = child
                folders.pop(i)
                i = 0
            else:
                parent_id = folders[i]['parents'][0]
                if not parent_id in self.folders_hash:
                    stack = []
                    folders.pop(i)
                    i = 0
                    continue
                elif filter_enabled:
                    title = self.folders_hash[parent_id].get_path(
                    ) + folders[i]['name'] + '/'
                    if (blacklist and (title in blacklist))\
                    or (whitelist and not\
                    any(elem in title for elem in whitelist)):
                        stack = []
                        folders.pop(i)
                        i = 0
                        continue

                child = DriveFile(self.folders_hash[parent_id], folders[i])
                self.folders_hash[child.get_id()] = child
                while stack:
                    parent = child
                    item = stack.pop()
                    if (blacklist and (title in blacklist))\
                    or (whitelist and not \
                    any(elem in title for elem in whitelist)):
                        stack = []
                        break
                    child = DriveFile(parent, item)
                    self.folders_hash[item['id']] = child
                folders.pop(i)
                i = 0
        if complete:
            if self.folders_hash:
                parents_query = [
                    'mimeType != \'%s\' and ("%s" in parents' %
                    (TYPES['folder'], list(self.folders_hash)[0])
                ]
                i = 0  # counter
                j = 0  # index of the list
                for item in list(self.folders_hash)[1:]:
                    adding = '"%s" in parents' % item
                    # 30000 is the max body size before too complex query
                    if len(' or ' + parents_query[j]) >= 25000:
                        parents_query[j] += ')'
                        j += 1
                        i = 0
                        parents_query.append(
                            'mimeType != \'%s\' and ("%s" in parents' %
                            (TYPES['folder'], item))
                        continue
                    parents_query[j] += ' or ' + adding
                    i += 1
                parents_query[j] += ')'
            else:
                print('no folders found')
                return ()
            fields = 'nextPageToken, files(name, id, parents, mimeType)'
            pageTokens = [None] * len(parents_query)
            files_metadata = []
            while True:
                for i, query in enumerate(parents_query):
                    if pageTokens[i] != '0':
                        result = self.service.files().list(q=query,\
                                                        fields=fields,\
                                                        pageToken=pageTokens[i],\
                                                        pageSize=1000).execute()
                        files_metadata += result.get('files', [])
                        pageTokens[i] = result.get('nextPageToken')
                        if not pageTokens[i]:
                            pageTokens[i] = '0'
                if all(token == '0' for token in pageTokens):
                    break

            for metadata in files_metadata:
                if not metadata['parents'][0] == self.root.get_id():
                    parent = self.folders_hash[metadata['parents'][0]]
                else:
                    if filter_enabled:
                        continue
                    parent = self.root
                DriveFile(parent, metadata)
Пример #30
0
def privacy_status_get():
	config = utils.load_settings(env)
	return json_response(config.get("privacy", True))
Пример #31
0
def privacy_status_set():
	config = utils.load_settings(env)
	config["privacy"] = (request.form.get('value') == "private")
	utils.write_settings(config, env)
	return "OK"
Пример #32
0
def privacy_status_set():
	config = utils.load_settings(env)
	config["privacy"] = (request.form.get('value') == "private")
	utils.write_settings(config, env)
	return "OK"
Пример #33
0
def privacy_status_get():
	config = utils.load_settings(env)
	return json_response(config.get("privacy", True))
    def _load_settings(self):
        for prov in self.provider.values():
            prov.load_credentials()

        plugin = utils.load_settings("plugin")
        self._set_chosen_plugin(plugin)
Пример #35
0
def get_default_quota(env):
	config = utils.load_settings(env)
	return config.get("default-quota", '0')
Пример #36
0
        err = "PID dir '%s' does not exist!" % pid_dir
        logger.critical(err)
        raise Exception(err)
    else:
        logger.info("PID dir '%s' exists." % pid_dir)

    pid_dir_owner = getpwuid(os.stat(pid_dir).st_uid).pw_name
    logger.info("PID dir '%s' is owned by '%s'." % (pid_dir, pid_dir_owner))
    if pid_dir_owner != os.environ["USER"]:
        logger.warn("PID dir owner is not the current user!")


if __name__ == "__main__":

    logger = get_logger()
    settings = load_settings()

    parser = OptionParser()
    parser.add_option(
        "-i",
        dest="interactive",
        help="Run the server in 'interactive' mode (print output to STDOUT)",
        default=False,
        action="store_true")
    parser.add_option("-p",
                      dest="port",
                      help="Force the server to run on the specified port",
                      default=None,
                      metavar="9999")
    parser.add_option(
        "--action",
Пример #37
0
def main():
    #---Load environment settings from SETTINGS.json in root directory and build filepaths for all base submissions---#
    settings = utils.load_settings('SETTINGS.json')
    base_filepaths = (settings['file_bryan_submission'],
                      settings['file_miroslaw_submission'])
    segment_weights = settings['ensemble_segment_weights']
    segments =  segment_weights.keys()
    targets = segment_weights[segments[0]].keys()

    #---Output the segment weights to be used for ensemble averaging of base submissions---#
    log.info('==========ENSEMBLE WEIGHTS (B,M)============')
    for segment in segment_weights:
        log.info(segment.upper()+':')
        for target in segment_weights[segment]:
            log.info('    '+target.upper()+' -- ['+segment_weights[segment][target]['0']+','+
                      segment_weights[segment][target]['1']+']')

    #---Load each base submission to a list of dataframes---#
    base_subs = []
    for file in base_filepaths:
        try:
            base_subs.append(pd.read_csv(file).set_index(['id'], drop=False).sort())
            log.info('Base submission successfully loaded: %s.' % file)
        except IOError:
            log.info('Base submission file does not exist: %s. Run base model to generate, or update filepath.' %file)
            sys.exit('---Exiting---')

    utils.line_break()

    #---Load id's labeled with segments to a dataframe used for segment based averaging---#
    file = settings['file_segment_ids']
    try:
        segment_ids = pd.read_csv(file)
        log.info('Segment IDs successfully loaded from: %s.' % file)
    except IOError:
        log.info('Segment IDs file does not exist: %s. Update filepath in SETTINGS.json.' % file)
    utils.line_break()

    #---Transform base predictions to log space prior to averaging, if selected in settings---#
    if settings['avg_log_space'] == 'y':
        log.info('Transforming base predictions to log space prior to averaging.')
        for i in range(len(base_subs)):
            for target in targets:
                base_subs[i][target] = np.log(base_subs[i][target]+1)
        utils.line_break()

    #---Apply segment based weights to each base submission then combine them to create ensemble submission---#
    log.info('Applying segment weights to base submissions then combining to create ensemble.')
    for i in range(len(base_subs)):
        #Merge the segment labels from the segment id's file with the base submission dataframe
        base_subs[i] = base_subs[i].merge(segment_ids,on='id',how='inner')
        for segment in segments:
            for target in targets:
                base_subs[i][target][base_subs[i]['Segment'] == segment] \
                    *= float(segment_weights[segment][target][str(i)])
        del base_subs[i]['Segment']
    ensemble_sub = base_subs[0].ix[:]
    for i in range(len(base_subs)-1):
        for target in targets:
            ensemble_sub[target] += base_subs[i+1][target]
    utils.line_break()

    #---Transform ensemble predictions back to normal, if use log space averaging was selected in settings---#
    if settings['avg_log_space'] == 'y':
        log.info('Transforming ensemble predictions back to normal from log space.')
        for target in targets:
            ensemble_sub[target] = np.exp(ensemble_sub[target])-1
        utils.line_break()

    #---Apply any final target scalars to ensemble predictions---#
    for target in targets:
        ensemble_sub[target] *= float(settings['target_scalars'][target])

    #---Output ensemble submission to directory set in SETTINGS.json, appending creation date and time---#
    timestamp = datetime.now().strftime('%m-%d-%y_%H%M')
    filename = settings['dir_ensemble_submissions']+'ensemble_predictions_'+timestamp+'.csv'
    ensemble_sub.to_csv(filename, index=False)
    log.info('Ensemble submission saved: %s' % filename)
    utils.line_break()

    #End main
    log.info('Program executed successfully without error! Exiting.')
Пример #38
0
def main():
    log.info('********New program instance started********')

    #-------------Load Environment----------------------#
    #Get program settings and model settings from SETTINGS.json file in root directory
    settings, model_settings = utils.load_settings()

    #If not using cached data, then load raw data, clean/munge it, create hand-crafted features, slice it for CV
    if settings['use_cached_data'] == 'y':
        log.info('==========LOADING CACHED FEATURES===========')
        dfTrn = data_io.load_cached_object('dfTrn')
        dfTest = data_io.load_cached_object('dfTest')
        dfCV = data_io.load_flatfile_to_df('Data/CV.csv')
    else:
        #-------Data Loading/Cleaning/Munging------------#
        #Load the data
        log.info('===============LOADING DATA=================')
        dfTrn = data_io.load_flatfile_to_df(settings['file_data_train'])
        dfTest = data_io.load_flatfile_to_df(settings['file_data_test'])
        dfCV = data_io.load_flatfile_to_df('Data/CV.csv')

        #Clean/Munge the data
        log.info('=======CLEANING AND MUNGING DATA============')
        dfTrn = munge.clean(dfTrn)
        dfTest = munge.clean(dfTest)

        #-------Feature creation-------------------------#
        #Add all currently used hand crafted features to dataframes
        log.info('====CREATING HAND-CRAFTED DATA FEATURES=====')
        features.add(dfTrn)
        features.add(dfTest)

        #---------Data slicing/parsing--------------------------#
        #Split data for CV
        if settings['generate_cv_score'] == 'y':
            log.info('=====SPLITTING DATA FOR CROSS-VALIDATION====')
            if settings['cv_method'] == 'april':
                dfTrnCV, dfTestCV = munge.temporal_split(dfTrn, (2013, 04, 1))
            elif settings['cv_method'] == 'march':
                #take an addtional week from February b/c of lack of remote_api source issues in March
                dfTrnCV, dfTestCV = munge.temporal_split(dfTrn, (2013, 02, 21))
            elif settings['cv_method'] == 'list_split':
                #load stored list of data points and use those for CV
                dfCVlist = pd.DataFrame({'id': data_io.load_cached_object('Cache/cv_issue_ids.pkl'), 'dummy': 0})
                dfTrnCV, dfTestCV = munge.list_split(dfTrn, dfCVlist)

    #--------------Modeling-------------------------#
    #If cached models exist then load them for reuse into segment_models.  Then run through model_settings and for
    # each model where 'use_cached_model' is false then clear the cached model and recreate it fresh
    log.info('=========LOADING CACHED MODELS==============')
    segment_models = data_io.load_cached_object('segment_models')
    if segment_models == None:
        log.info('=========CACHED MODELS NOT LOADED===========')
        for model in model_settings:
            model['use_cached_model'] = 'n'
        segment_models = []
    #Initialize new model for models not set to use cache
    log.info('=======INITIALIZING UN-CACHED MODELS========')
    index = 0
    for model in model_settings:
        if model_settings[model]['use_cached_model'] == 'n':
            new_model = ensembles.Model(model_name=model,target=model_settings[model]['target'],
                                        segment=model_settings[model]['segment'],
                                        estimator_class=model_settings[model]['estimator_class'],
                                        estimator_params=model_settings[model]['estimator_params'],
                                        features=model_settings[model]['features'],
                                        postprocess_scalar=model_settings[model]['postprocess_scalar'])
            #Flag the model as not cached, so that it does not get skipped when running the modeling process
            new_model.use_cached_model='n'
            #Project specific model attributes not part of base class
            new_model.KNN_neighborhood_threshold=model_settings[model]['KNN_neighborhood_threshold']
            new_model.sub_zip_neighborhood=model_settings[model]['sub_zip_neighborhood']
            segment_models[index] = new_model
            log.info('Model %s intialized at index %i' % (model,index))
        index += 1

    #Cross validate all segment models (optional)
    if settings['export_cv_predictions_all_models'] == 'y' or settings['export_cv_predictions_new_models'] == 'y':
        log.info('============CROSS VALIDATION================')
        for model in segment_models[:]:
            #If model has cached CV predictions then skip predicting and just export them (if selected in settings)
            if hasattr(model,'dfCVPredictions'):
                log.info('Cached CV predictions found.  Using cached CV predictions.')
                if settings['export_cv_predictions_all_models'] == 'y':
                    data_io.save_predictions(model.dfCVPredictions,model.target,model_name=model.model_name,
                                             directory=settings['dir_submissions'],
                                             estimator_class=model.estimator_class, note='CV_list')
            else:
                print_model_header(model)
                #Prepare segment model:  segment and create feature vectors for the CV data set
                dfTrn_Segment, dfTest_Segment = prepare_segment_model(dfTrnCV,dfTestCV,model)
                #Generate CV predictions
                train.cross_validate(model, settings, dfTrn_Segment, dfTest_Segment)
                #Cache the CV predictions as a dataframe stored in each segment model
                model.dfCVPredictions = dfTest_Segment.ix[:,['id',model.target]]
                if settings['export_cv_predictions_new_models'] == 'y':
                    data_io.save_predictions(model.dfCVPredictions,model.target,model_name=model.model_name,
                                             directory=settings['dir_submissions'],
                                             estimator_class=model.estimator_class, note='CV_list')

    #Generate predictions on test set for all segment models (optional)
    if settings['export_predictions_all_models'] == 'y' or settings['export_predictions_new_models'] == 'y'\
        or settings['export_predictions_total'] == 'y':
        log.info('=======GENERATING TEST PREDICTIONS==========')
        for model in segment_models[:]:
            #If model has cached test predictions then skip predicting and just export them (if selected in settings)
            if hasattr(model,'dfPredictions'):
                log.info('Cached test predictions found for model %s.  Using cached predictions.' % model.model_name)
                if settings['export_predictions_all_models'] == 'y':
                    data_io.save_predictions(model.dfPredictions,model.target,model_name=model.model_name,
                             directory=settings['dir_submissions'],
                             estimator_class=model.estimator_class,note='TESTset')
            else:
                print_model_header(model)
                #Prepare segment model:  segment and create feature vectors for the full TEST data set
                dfTrn_Segment, dfTest_Segment = prepare_segment_model(dfTrn,dfTest,model)
                #Generate TEST set predictions
                model.predict(dfTrn_Segment, dfTest_Segment)
                if settings['export_predictions_all_models'] == 'y' or settings['export_predictions_new_models'] == 'y':
                    data_io.save_predictions(model.dfPredictions,model.target,model_name=model.model_name,
                                             directory=settings['dir_submissions'],
                                             estimator_class=model.estimator_class,note='TESTset')
                log.info(utils.line_break())

    #Cache the trained models and predictions to file (optional)
    if settings['export_cached_models'] == 'y':
        log.info('==========EXPORTING CACHED MODELS===========')
        data_io.save_cached_object(segment_models,'segment_models')

    #Merge each segment model's CV predictions into a master dataframe and export it (optional)----#
    if settings['export_cv_predictions_total'] == 'y':
        log.info('====MERGING CV PREDICTIONS FROM SEGMENTS====')
        dfTestPredictionsTotal = merge_segment_predictions(segment_models, dfTestCV, cv=True)
        #---Apply post process rules to master dataframe---#
        #Set all votes and comments for remote_api segment to 1 and 0
        dfTestPredictionsTotal = dfTestPredictionsTotal.merge(dfTest.ix[:][['source','id']], on='id', how='left')
        for x in dfTestPredictionsTotal.index:
            if dfTestPredictionsTotal.source[x] == 'remote_api_created':
                dfTestPredictionsTotal.num_votes[x] = 1
                dfTestPredictionsTotal.num_comments[x] = 0
        #Export
        timestamp = datetime.now().strftime('%m-%d-%y_%H%M')
        filename = 'Submits/'+timestamp+'--bryan_CV_predictions.csv'
        dfTestPredictionsTotal.to_csv(filename)


    #Merge each segment model's TEST predictions into a master dataframe and export it (optional)----#
    if settings['export_predictions_total'] == 'y':
        log.info('===MERGING TEST PREDICTIONS FROM SEGMENTS===')
        dfTestPredictionsTotal = merge_segment_predictions(segment_models, dfTest)
        #---Apply post process rules to master dataframe---#
        #Set all votes and comments for remote_api segment to 1 and 0
        dfTestPredictionsTotal = dfTestPredictionsTotal.merge(dfTest.ix[:][['source','id']], on='id', how='left')
        for x in dfTestPredictionsTotal.index:
            if dfTestPredictionsTotal.source[x] == 'remote_api_created':
                dfTestPredictionsTotal.num_votes[x] = 1
                dfTestPredictionsTotal.num_comments[x] = 0
        del dfTestPredictionsTotal['source']
        #Export
        filename = 'bryan_test_predictions.csv'
        data_io.save_combined_predictions(dfTestPredictionsTotal, settings['dir_submissions'], filename)

    #End main
    log.info('********Program ran successfully. Exiting********')