예제 #1
0
def _task_create_kwargs(arguments):
    result = AttrDict(
        name=arguments['<name>'],
        state='pending',
        command=arguments['--command'],
        condition=arguments['--condition'],
        last=str(datetime.now()),
    )

    if arguments['--once']:
        result.last += '<once>'

    if arguments['--inherit']:
        parent = db.read(table='tasks', name=arguments['--inherit'])
        result.update(dict(
            parent=parent.name,
            schedule='<inherit>',
            command=result.command if result.command else '<inherit>',
            email='<inherit>',
            condition=result.condition if result.condition else '<inherit>',
        ))

    result.update(_task_sched_kwargs(arguments))

    return result
예제 #2
0
def run_lstm_variant(variant='SlowLSTM', cuda=False, size=128, jit=False):
    assert variant in lstms
    p = AttrDict({'cuda': cuda, 'lstm_kind': variant, 'size': size})

    name = '{}_size{}{}{}'.format(variant, size, tag(cuda=cuda), tag(jit=jit))

    def C(x):
        if p.cuda:
            x = x.cuda()
        return x

    lstm = getattr(lstm_variants, p.lstm_kind)
    x = V(C(th.rand(1, BATCH, p.size)))
    hiddens = (V(C(th.rand(1, BATCH, p.size))), V(C(th.rand(1, BATCH,
                                                            p.size))))
    th.manual_seed(1234)
    cus = C(lstm(p.size, p.size, dropout=DROPOUT, jit=jit))
    if hasattr(cus, 'mask'):
        cus.mask = C(cus.mask)

    iter_timer = Bench(name=name, cuda=cuda, warmup_iters=3)

    # Super slow on CPU
    iters = 20 if cuda else 6
    for _ in range(iters):
        gc.collect()
        with iter_timer:
            out, h = x, hiddens
            for i in range(SEQ_LEN):
                out, h = cus(out, h)

    return iter_timer
예제 #3
0
def read() -> AttrDict:
    with open(SETTINGS_DEFAULT) as f:
        settings = AttrDict(yaml.load(f))

    try:
        with open(SETTINGS_USER) as f:
            setting_dict = yaml.load(f)

    except FileNotFoundError:
        open(SETTINGS_USER, 'w').close()

    else:
        if setting_dict is not None:
            settings.update(setting_dict)

    return settings
예제 #4
0
def _print_subtask(task, long, indent):
    task = AttrDict(task)
    tabs = '  ' * indent
    print(tabs, end='')

    if long:
        print(task.name, ', '.join('{}: {}'.format(k, v) for k, v in task.items()
                                   if v and k != 'name' and k[0] != '~'),
              end='')

    else:
        print('{}: {}'.format(task.name, task.state), end='')

    if '~summary' in task:
        print('', _subtask_summary_repr(task['~summary']))

    else:
        print()
예제 #5
0
def time_a_star(h, w, p, maps, runs):

    data = AttrDict({'h': l1_dist})
    generate_map = lambda: generate_random_map(h, w, p=p, start_and_goal=False)
    algo = lambda m, s, g, d: a_star(m, s, g, d.h)

    avg_time, times, fc = experiment(algo,
                                     generate_map=generate_map,
                                     data=data,
                                     maps=maps,
                                     runs=runs)

    print('%e' % avg_time, fc)
예제 #6
0
def time_block_a_star(b_sz, h, w, p, maps, runs):

    lddb, pathsdb = make_lddb(b_sz, from_file=True, save_to_file=False)

    data = AttrDict({
        'block_size': b_sz,
        'lddb': lddb,
        'pathsdb': pathsdb,
        'h': l1_dist
    })
    generate_map = lambda: generate_random_map(h, w, p=p, start_and_goal=False)
    preprocessing = lambda m, d: BlockMap(m, d.block_size)
    algo = lambda m, s, g, d: block_a_star(d.lddb, d.pathsdb, m, s, g, d.h)

    avg_time, times, fc = experiment(algo,
                                     generate_map=generate_map,
                                     data=data,
                                     preprocessing=preprocessing,
                                     maps=maps,
                                     runs=runs)

    print('%e' % avg_time, fc)
예제 #7
0
def experiment(algo,
               generate_map,
               data=AttrDict(),
               preprocessing=lambda m, d: m,
               maps=500,
               runs=100,
               ignore_failures=False):

    times = []
    failure_count = 0
    for i in range(maps):
        if i % 10 == 0:
            print(i)

        _m = generate_map()
        m = _m.copy()

        h, w = _m.shape

        starts = [(np.random.randint(h), np.random.randint(w))
                  for _ in range(runs)]
        goals = [(np.random.randint(h), np.random.randint(w))
                 for _ in range(runs)]

        for j, (start, goal) in enumerate(zip(starts, goals)):
            m = m | _m
            m[start[0], start[1]] = 0
            m[goal[0], goal[1]] = 0
            Map = preprocessing(m, data)

            t1 = perf_counter()
            goal_found, path = algo(Map, start, goal, data)
            times.append(perf_counter() - t1)
            if not goal_found:
                failure_count += 1
                if ignore_failures:
                    continue

    return sum(times) / len(times), times, failure_count
예제 #8
0
def aggregateInstances(instances):
  """Aggregates instances in to a meta instance."""
  result = AttrDict(
    count = 0,
    firstOccurrence = None,
    lastOccurrence = None,
    lastMessage = None,
    backtrace = None,
    environments = set(),
    servers = set()
  )

  for instance in instances:
    aggregate(result,
              int(instance['count']),
              parseDate(instance['firstOccurrence']),
              parseDate(instance['lastOccurrence']),
              instance['lastMessage'],
              instance['backtrace'],
              instance['environments'],
              instance['servers'])

  return result
예제 #9
0
        self.data_batches = [
            Variable(cast(torch.zeros(p.batch_size, 28 * 28, 1)))
            for _ in range(p.num_batches)
        ]
        self.target_batches = [
            Variable(cast(torch.zeros(p.batch_size)).long())
            for _ in range(p.num_batches)
        ]
        if p.cuda:
            self.model.cuda()
            self.criterion.cuda()

    def time(self, p):
        total_loss = 0
        for data, targets in zip(self.data_batches, self.target_batches):
            logits = self.model(data)
            loss = self.criterion(input=logits, target=targets)
            loss.backward()
            total_loss += loss.data  # CUDA sync point
        if p.cuda:
            torch.cuda.synchronize()


if __name__ == '__main__':
    d = WLM.default_params.copy()
    d['cuda'] = False
    p = AttrDict(d)
    m = WLM()
    m.prepare(p)
    m.time(p)
예제 #10
0
파일: memnn.py 프로젝트: zou3519/benchmark
def run_memnn(warmup=2, benchmark=18, jit=False, cuda=False):
    nbatches = warmup + benchmark

    default_params = dict(lr=0.01,
                          embedding_size=128,
                          hops=3,
                          mem_size=100,
                          time_features=False,
                          position_encoding=True,
                          output='rank',
                          dropout=0.1,
                          optimizer='adam',
                          num_features=500,
                          num_batches=nbatches,
                          cuda=cuda)
    params = AttrDict(default_params)
    """Set up model."""
    # The CPU version is slow...
    params['batch_size'] = 4 if params.cuda else 4

    if params.cuda:
        device = torch.device('cuda:0')
    else:
        device = torch.device('cpu')

    model = memnn.MemNN(params, params.num_features)
    criterion = nn.CrossEntropyLoss()
    data_batches = [
        [  # memories, queries, memory_lengths, query_lengths
            torch.zeros(params.batch_size * params.mem_size,
                        dtype=torch.long,
                        device=device),
            torch.zeros(params.batch_size * 28,
                        dtype=torch.long,
                        device=device),
            torch.ones(params.batch_size,
                       params.mem_size,
                       dtype=torch.long,
                       device=device),
            torch.full((params.batch_size, ),
                       28,
                       dtype=torch.long,
                       device=device),
        ] for _ in range(params.num_batches)
    ]
    cand_batches = [
        torch.zeros(params.batch_size * 14,
                    params.embedding_size,
                    device=device) for _ in range(params.num_batches)
    ]
    target_batches = [
        torch.ones(params.batch_size, dtype=torch.long, device=device)
        for _ in range(params.num_batches)
    ]

    # model.to(device) # embeddings are performed on CPU
    # the memnn model takes care of things when it is passed the cuda flag
    criterion.to(device)
    """Time model."""
    cuda_tag = '_cuda' if cuda else ''
    jit_tag = '_jit' if jit else ''
    name = 'memnn{}{}'.format(cuda_tag, jit_tag)
    bench = Bench(name=name, cuda=cuda, warmup_iters=warmup)
    trace_once = jit

    total_loss = 0
    for data, cands, targets in zip(data_batches, cand_batches,
                                    target_batches):
        gc.collect()
        if trace_once:
            model = torch.jit.trace(*data)(model)
            trace_once = False
        with bench:
            output_embeddings = model(*data)
            scores = one_to_many(output_embeddings, cands)
            loss = criterion(scores, targets)
            loss.backward()
            total_loss += float(loss.item())

    return bench
예제 #11
0
def block_a_star(lddb, pathsdb, Map, start, goal, h):
	"""Block A* search. Implementation of Algorithm 2 from the paper:

	Yap, P., Burch, N., Holte, R. C., & Schaeffer, J. (2011, August). 
	Block A*: Database-driven search with applications in any-angle path-planning. 
	In Twenty-Fifth AAAI Conference on Artificial Intelligence.

	Args:
		lddb: local distance database
		pathsdb: local paths database
		Map: a BlockMap representing the map to be searched
		start: global address of the start node
		goal: global address of the goal node
		h: heuristic function

	Returns:
		A tuple of: a boolean value indicating if the goal was found 
		and a list of nodes on the path between start and goal.
		(goal_found, path)
	"""
	# a dict that will hold the state during the algorithm's run
	state = AttrDict({
		'Map': Map,
		'start': start,
		'goal': goal,

		'lddb': lddb,
		'pathsdb': pathsdb,
		'h': lambda block, node: h(to_global_node(block, node), goal),

		'g': defaultdict(dict),
		'g_changed': defaultdict(dict),
		'heapvalue': {},
		'heap': PriorityQueue(),
		'parent': {}
	})

	# *_block_node = local address within corresponding block
	start_block, start_block_node = Map.get_node_block(start)
	goal_block , goal_block_node  = Map.get_node_block(goal)

	init(state, start_block, start_block_node)
	init(state, goal_block, goal_block_node)

	state.g[start_block][start_block_node] = 0
	state.g_changed[start_block][start_block_node] = True
	state.parent[(start_block, start_block_node)] = (None, None)

	state.heap.push(start_block, 0)
	state.heapvalue[start_block] = 0

	length = np.inf
	while not state.heap.empty() and state.heapvalue[state.heap.top()[0]] < length:

		curr_block = state.heap.pop()
		ingress_nodes = get_ingress_nodes(state, curr_block)
		
		if len(ingress_nodes) == 0:
			continue
		
		if curr_block == goal_block:

			block_lddb = state.lddb[curr_block.idx]
			dists_to_goal = [
				state.g[curr_block][y] + block_lddb.get((y, goal_block_node), np.inf) for y in ingress_nodes
			]
			
			new_length = min(length, np.min(dists_to_goal))

			if new_length < length:
				length = new_length
				nearest_ingress_node = ingress_nodes[np.argmin(dists_to_goal)]
				# set parent of goal node, but avoid pointing to self
				if goal_block_node != nearest_ingress_node:
					state.parent[(goal_block, goal_block_node)] = (curr_block, nearest_ingress_node)

		expand_block(state, curr_block, ingress_nodes)

	if length < np.inf:
		return True, recover_path(state, goal_block, goal_block_node)
	else:
		return False, []