Пример #1
0
    def __init__(self, workload_file):
        self.workload_file = workload_file

        # avg_used_slots = load * SLOTS_PER_WORKER * TOTAL_WORKERS
        # self.interarrival_delay = (1.0 * MEDIAN_TASK_DURATION * TASKS_PER_JOB / avg_used_slots)
        # print ("Interarrival delay: %s (avg slots in use: %s)" %
         #      (self.interarrival_delay, avg_used_slots))

        self.tasks = defaultdict()
        self.task_arrival = defaultdict(list)
        # self. num_jobs

        self.event_queue = Queue.PriorityQueue()
        self.VMs = defaultdict(lambda: np.ndarray(0))
        self.completed_VMs = defaultdict(list)
        self.lambdas = defaultdict()
        # self.file_prefix = file_prefix

        j = 0
        while j < INITIAL_WORKERS:
            i = 0
            while i < 3:
                self.VMs.setdefault(i, []).append(VM(self,0,start_up_delay,i,4,8192, 0.10,False,len(self.VMs[i])))
                i += 1
            j += 1
Пример #2
0
 def __init__(
         self,
         simulation,
         current_time,
         up_time,
         task_type,
         vcpu,
         vmem,
         price,
         spin_up,
         id):
     self.simulation = simulation
     self.start_time = current_time
     self.up_time = up_time
     self.end_time = current_time
     self.vcpu = vcpu
     self.vmem = vmem
     self.queued_tasks = Queue.PriorityQueue()
     self.id = id
     self.isIdle = True
     self.lastIdleTime = current_time
     self.price = price
     self.task_type = task_type
     self.spin_up = spin_up
     #print("adding worker id and type",self.id,self.task_type)
     if task_type == 0:
         self.free_slots = 6
         self.max_slots = 6
     if task_type == 1:
         self.free_slots = 5
         self.max_slots = 5
     if task_type == 2:
         self.free_slots = 2
         self.max_slots = 2
     self.num_queued_tasks = 0
Пример #3
0
from multiprocessing import Process, Queue
import multiprocessing as mp
from multiprocessing.managers import BaseManager
import Queue
import config

conf = config.config('config')
server = conf.get_value('server')
port = int(conf.get_value('port'))
authkey = conf.get_value('authkey')
sched_buffer = int(conf.get_value('sched_buffer'))
sched_cycle = float(conf.get_value('sched_cycle'))

queue_in = mp.Queue()

queue_buffer = Queue.PriorityQueue(sched_buffer)
queue_queue = Queue.PriorityQueue()

queue_del = mp.Queue()
queue_mess = mp.Queue()

res = {}
jobid = {
    'jobid': 1
}
joblist = {}


class QueueManager(BaseManager):
    pass
Пример #4
0
def beam_decode(target_tensor, decoder_hiddens, decoder, encoder_outputs=None):
    beam_width = 10
    topk = 1
    decoded_batch = []

    for idx in range(target_tensor.size(0)):
        if isinstance(decoder_hiddens, tuple):  # LSTM case
            decoder_hidden = (decoder_hiddens[0][:, idx, :].unsqueeze(0),
                              decoder_hiddens[1][:, idx, :].unsqueeze(0))
        else:
            decoder_hidden = decoder_hiddens[:, idx, :].unsqueeze(0)
        encoder_output = encoder_outputs[:, idx, :].unsqueeze(1)
        decoder_input = torch.LongTensor([[SOS_token]], device=device)
        endnodes = []
        number_required = min((topk + 1), topk - len(endnodes))

        node = BeamSearchNode(decoder_hidden, None, decoder_input, 0, 1)
        nodes = Queue.PriorityQueue()
        nodes.put((-node.eval(), node))
        qsize = 1
        while True:
            if qsize > 2000: break
            score, n = nodes.get()
            decoder_input = n.wordid
            decoder_hidden = n.h

            if n.wordid.item() == EOS_token and n.prevNode != None:
                endnodes.append((score, n))
                if len(endnodes) >= number_required:
                    break
                else:
                    continue

            decoder_output, decoder_hidden = decoder(decoder_input,
                                                     decoder_hidden,
                                                     encoder_output)

            log_prob, indexes = torch.topk(decoder_output, beam_width)
            nextnodes = []

            for new_k in range(beam_width):
                decoded_t = indexes[0][new_k].view(1, -1)
                log_p = log_prob[0][new_k].item()

                node = BeamSearchNode(decoder_hidden, n, decoded_t,
                                      n.logp + log_p, n.leng + 1)
                score = -node.eval()
                nextnodes.append((score, node))

            for i in range(len(nextnodes)):
                score, nn = nextnodes[i]
                nodes.put((score, nn))
            qsize += len(nextnodes) - 1

        if len(endnodes) == 0:
            endnodes = [nodes.get() for _ in range(topk)]

        utterances = []
        for score, n in sorted(endnodes, key=operator.itemgetter(0)):
            utterance = []
            utterance.append(n.wordid)
            while n.prevNode != None:
                n = n.prevNode
                utterance.append(n.wordid)

            utterance = utterance[::-1]
            utterances.append(utterance)

        decoded_batch.append(utterances)

    return decoded_batch