def get_control_timestamp(options, control_file_name="crawl-timestamp.txt"):
    control_dir = get_control_dir(options)

    backend = get_backend(control_dir)

    if backend == "fs":
        try:
            filename = "{}/{}".format(control_dir, control_file_name)
            return slurp(filename)
        except FileNotFoundError as ex:
            return None
Exemple #2
0
    def load_test_data(cls, file, serializer_class=None,
                       username="******", type="JSON"):

        logging.info("Loading data from file {}".format(file))

        request = RequestFactory().get('./fake_path')
        request.user = User.objects.get(username=username)

        if type == "JSON":
            data = json.loads(slurp(file))

            if serializer_class:
                logging.info("Validating data using serializer {}".
                             format(serializer_class))
                has_errors = False
                errors_by_resource = {}
                for index, resource in enumerate(data):
                    serializer = serializer_class(
                        data=resource, context={'request': request})
                    if not serializer.is_valid():
                        has_errors = True
                        errors_by_resource["{}".format(index)] = \
                            serializer.errors

                if has_errors:
                    raise AssertionError(
                        "There are some errors in the json data of the test",
                        errors_by_resource)

            return data
        elif type == "CSV":
            data = []
            with open(file, 'r') as f:
                reader = csv.DictReader(f)
                for master_value in reader:
                    data.append(master_value)
            return data
        else:
            raise AssertionError(
                "Invalid file type '{0}'. Valid types are: [{1}]".
                format(type, ", ".join(["JSON", "CSV"])))
    words = phrase.split()
    freq = [len(list(group)) for key, group in groupby(sorted(words))]
    dups = [count for count in freq if count > 1]
    return len(dups) == 0


assert is_valid('aa bb cc dd ee') is True
assert is_valid('aa bb cc dd aa') is False
assert is_valid('aa bb cc dd aaa') is True


def is_valid_anagram(phrase):
    words = phrase.split()
    words = [''.join(sorted(word)) for word in words]
    freq = [len(list(group)) for key, group in groupby(sorted(words))]
    dups = [count for count in freq if count > 1]
    return len(dups) == 0


assert is_valid_anagram('abcde fghij') is True
assert is_valid_anagram('abcde xyz ecdab') is False
assert is_valid_anagram('a ab abc abd abf abj') is True
assert is_valid_anagram('iiii oiii ooii oooi oooo') is True
assert is_valid_anagram('oiii ioii iioi iiio') is False

phrases = slurp('day04.txt')

print len([phrase for phrase in phrases.split('\n') if is_valid(phrase)])
print len(
    [phrase for phrase in phrases.split('\n') if is_valid_anagram(phrase)])
#!/usr/bin/env python3

import json
import os
from os.path import realpath, dirname
from html import escape
from mako.template import Template
from spitslurp import spit, slurp

script_path = dirname(realpath(__file__))
os.chdir(script_path)

dest_dir = "../dist"
src_dir  = "../src"

conf = json.loads(slurp('../address_app.conf.json'))
port    = conf['port']
host    = conf['host']
api_key = conf['browser_gmap_api_key']

templ = Template(filename = 'address_app.html.mako')
address_app_html = templ.render(api_key=api_key)
spit (dest_dir + '/address_app.html', address_app_html)

templ = Template(filename = 'source_code.html.mako')
spit (dest_dir + '/source_code.html', templ.render(
  address_app_html  = escape(address_app_html),
  ajax_address_js   = escape(slurp (src_dir + '/AjaxAddress.js')),
  list_addresses_js = escape(slurp (src_dir + '/ListAddresses.js')),
  address_map_js    = escape(slurp (src_dir + '/AddressMap.js')),
  util_js           = escape(slurp (src_dir + '/util.js')),
Exemple #5
0
 def test_default(self):
     with tempfile.NamedTemporaryFile() as temp_file:
         txt = u'Hello\nWorld\n'
         self.assertEqual(spit(temp_file.name, txt), txt)
         self.assertEqual(slurp(temp_file.name), txt)
    largest = max(memory)
    idx = memory.index(largest)
    # print largest, idx
    memory[idx] = 0
    while largest > 0:
        idx = next_idx(length, idx)
        # print idx
        memory[idx] = memory[idx] + 1
        largest = largest - 1
    return memory


# print memory
# print balance(memory)

memory = map(int, slurp('day06.txt').split())
states = set(memory)
steps = 0
while True:
    memory = balance(memory)
    steps = steps + 1
    if json.dumps(memory) in states:
        break
    else:
        states.add(json.dumps(memory))

print 'Part 1:', steps

# Part 2 - steps between identical states

memory = map(int, slurp('day06.txt').split())
Exemple #7
0
    idx = 0
    try:
        while True:
            val = offsets[idx]
            # print 'val', val
            offsets[idx] = val + 1
            # print 'offsets', offsets
            idx = idx + val
            steps = steps + 1
    except IndexError:
        pass
    return steps

assert escape([0, 3, 0, 1, -3]) == 5

print 'Part 1:', escape(map(int, slurp('day05.txt').strip().split('\n')))


def escape2(offsets):
    steps = 0
    idx = 0
    try:
        while True:
            val = offsets[idx]
            # print 'val', val
            if val >= 3:
                offsets[idx] = val - 1
            else:
                offsets[idx] = val + 1
            # print 'offsets', offsets
            idx = idx + val
Exemple #8
0
    def load_test_data(
        cls,
        file,
        serializer_class=None,
        username=None,
        type="JSON",
        return_pure_json=True,
        request=None,
        replace=None,
        serializer_action=None,
        context=None,
    ):

        logging.info("Loading data from file {}".format(file))

        if not username:
            username = cls.user.username

        if not request:
            request = RequestFactory().get("./fake_path")

        if not hasattr(request, "user") or not request.user:
            request.user = get_user_model().objects.get(username=username)

        if not context:
            context = {"request": request}

        if not hasattr(context, "request"):
            context["request"] = request

        if type == "JSON":
            file_content = slurp(file)
            if replace:
                for term, value in replace.items():
                    if isinstance(value, (list, set)):
                        file_content = file_content.replace(
                            f"\"@@{term}@@\"", json.dumps(value))
                    else:
                        file_content = file_content.replace(
                            f"@@{term}@@", value)
            data = json.loads(file_content)

            if serializer_class:
                logging.info("Validating data using serializer {}".format(
                    serializer_class))
                has_errors = False
                errors_by_resource = {}
                object_json = []
                for index, resource in enumerate(data):
                    serializer = serializer_class(data=resource,
                                                  context=context)
                    if not serializer.is_valid():
                        has_errors = True
                        errors_by_resource["{}".format(
                            index)] = serializer.errors

                    elif not return_pure_json:
                        if not serializer_action:
                            instance = deserialize_instance(
                                serializer, serializer.Meta.model)
                            object_json.append(instance)
                        else:
                            object_json.append(
                                getattr(serializer, serializer_action)(
                                    serializer.validated_data))

                if has_errors:
                    raise AssertionError(
                        "There are some errors in the json data of the test",
                        errors_by_resource)
                elif not return_pure_json:
                    return object_json

            if return_pure_json:
                return data
        elif type == "CSV":
            data = []
            with open(file, "r") as f:
                reader = csv.DictReader(f)
                for master_value in reader:
                    data.append(master_value)
            return data
        else:
            raise AssertionError(
                "Invalid file type '{0}'. Valid types are: [{1}]".format(
                    type, ", ".join(["JSON", "CSV"])))
    get_sum = partial(get, 'sum')
    for node in nodes:
        node['sum'] = sum_branch(nodes, node)
    for node in nodes:
        child_nodes = [find_node(nodes, node_name) for node_name in node['children']]
        if len(child_nodes):
            child_nodes = sorted(child_nodes, key=get_sum)
            freq = [[name, len(list(group))] for name, group in groupby(child_nodes, key=get_sum)]
            # print freq
            unbalanced = [name for name, count in freq if count == 1]
            expected_sum = [name for name, count in freq if count > 1][0]
            if len(unbalanced):
                unbalanced = unbalanced[0]
                unbalanced_node = [node for node in child_nodes if node['sum'] == unbalanced][0]
                return (unbalanced_node, expected_sum)
    return (None, None)


nodes = parse(slurp('day07_test.txt'))

# print sum_branch(nodes, find_node(nodes, 'tknk'))

nodes = parse(slurp('day07.txt'))
# print json.dumps(nodes, indent=2)
print 'Part 1:', find_root(nodes)

(unbalanced_node, expected_sum) = find_unbalanced(nodes)
print 'unbalanced_node:', unbalanced_node
print 'expected_sum:', expected_sum
print 'Part 2:', unbalanced_node['weight'] + (expected_sum - unbalanced_node['sum'])