Example #1
0
    def _write(self, name, data):
        # record memory of write process
        memory_usage(-1, interval=MONITOR_CONFIGS["interval"]["write"], timeout=MONITOR_CONFIGS["timeout"]["write"],
                     max_usage=True, timestamps=True,
                     stream=open(os.path.join(current_location,
                                              MONITOR_CONFIGS["memory_profile_store"],
                                              self.workflow_submission_id)
                                 + ".dat",
                                 "a+"),
                     description=("write", self.pe.id, self.pe.rank))
        super(MultiProcessingWrapper, self)._write(name, data)

        # self.pe.log('Writing %s to %s' % (data, name))
        try:
            targets = self.targets[name]
        except KeyError:
            # no targets
            if self.result_queue:
                self.result_queue.put((self.pe.id, name, data))
            return
        for (inputName, communication) in targets:
            output = {inputName: data}
            dest = communication.getDestination(output)
            for i in dest:
                # self.pe.log('Writing out %s' % output)
                try:
                    self.output_queues[i].put((output, STATUS_ACTIVE))
                except:
                    self.pe.log("Failed to write item to output '%s'" % name)
Example #2
0
    def _read(self):
        # record memory of read process
        memory_usage(-1, interval=MONITOR_CONFIGS["interval"]["read"], timeout=MONITOR_CONFIGS["timeout"]["read"],
                     max_usage=True, timestamps=True,
                     stream=open(os.path.join(current_location,
                                              MONITOR_CONFIGS["memory_profile_store"],
                                              self.workflow_submission_id)
                                 + ".dat",
                                 "a+"),
                     description=("read", self.pe.id, self.pe.rank))

        result = super(MultiProcessingWrapper, self)._read()

        if result is not None:
            return result
        # read from input queue
        no_data = True
        while no_data:
            try:
                data, status = self.input_queue.get()
                no_data = False
                # self.pe.log("data: %s, status: %s" %(data, status))
            except:
                self.pe.log('Failed to read item from queue')
                pass

        while status == STATUS_TERMINATED:
            self.terminated += 1
            # self.pe.log("num_sources: %s, num_terminated: %s" % (self._num_sources, self.terminated))
            if self.terminated >= self._num_sources:
                return data, status
            else:
                try:
                    data, status = self.input_queue.get()
                    # self.pe.log("data: %s, status: %s" % (data, status))
                except:
                    self.pe.log('Failed to read item from queue')
                    pass

        return data, status
Example #3
0
    def process(self):
        num_iterations = 0
        total_in_size = 0
        total_out_size = 0
        in_data_types = set()
        out_data_types = set()
        process_time = 0
        read_time = 0
        write_time = 0

        self.pe.preprocess()

        read_begin_time = time.time()

        result = self._read()

        read_end_time = time.time()
        read_time += read_end_time - read_begin_time

        inputs, status = result

        while status != STATUS_TERMINATED:
            if inputs is not None:
                if inputs:
                    total_in_size += sys.getsizeof(inputs)
                    # get data type of the first input value
                    in_type = type(inputs.itervalues().next()).__name__
                    in_data_types.add(in_type)

                process_begin_time = time.time()

                try:
                    outputs = memory_usage(
                        (self.pe.process, (inputs,)),
                        interval=MONITOR_CONFIGS["interval"]["process"],
                        timeout=MONITOR_CONFIGS["timeout"]["process"],
                        max_usage=True, timestamps=True,
                        retval=True,
                        stream=open(os.path.join(current_location,
                                                MONITOR_CONFIGS["memory_profile_store"],
                                                self.workflow_submission_id) + ".dat",
                                    "a+"),
                        description=("process", self.pe.id, self.pe.rank))

                except StandardError, e:
                    print e
                    outputs = self.pe.process(inputs)

                process_end_time = time.time()
                process_time += process_end_time - process_begin_time
                if outputs:
                    total_out_size += sys.getsizeof(outputs)
                    out_type = type(outputs.itervalues().next()).__name__
                    out_data_types.add(out_type)

                num_iterations += 1
                if outputs is not None:
                    # self.pe.log('Produced output: %s' % outputs)
                    for key, value in outputs.items():
                        begin_write_time = time.time()

                        self._write(key, value)

                        end_write_time = time.time()
                        write_time += end_write_time - begin_write_time

            read_begin_time = time.time()

            inputs, status = self._read()

            read_end_time = time.time()
            read_time += read_end_time - read_begin_time