示例#1
0
    def drive_previous(self, drive):
        '''
        List previous process when a drive is clicked in Previous tab

            Parameters:
                drive (list): List of drive chosen
        '''
        # Check if there is previous process
        if os.path.exists(prevProcessfname) is False:
            print_process(self.window, "No previous process...")
            return

        hist_list = load_previous_process(prevProcessfname, driveName=drive[0])
        self.window['-PREVDIRS-'].update(hist_list)
示例#2
0
    def set_destination(self):
        '''
        Process for setting destination folder
        '''
        if len(self.chosenPath) == 0:  # Return if not path chosen
            return

        # Return if chosenPath is a file by checking the last character
        if self.chosenPath[-1] != '/' and self.chosenPath[-1] != ':':
            print_process(self.window, "Destination cannot be a File")
            return

        self.window['-DES-'].update(self.chosenPath)
        self.rc.desPath = self.chosenPath  # Store value
def work(q, example_paths, label_paths, total_files, export_path_example,
         export_path_label, size, layover, input_size):
    """
    The worker that produces the masks for a single pair of examples and labels

    :param q:
    :param example_paths:
    :param label_paths:
    :param total_files:
    :param export_path_example:
    :param export_path_label:
    :param size:
    :param layover:
    :param input_size:
    :return:
    """

    while not q.empty():
        try:
            i = q.get(False)
        except Empty:
            break

        # Show progress
        utils.print_process(total_files - q.qsize(), total_files)

        # We assume that related examples and labels have the same index in the path lists
        example_path = example_paths[i]
        label_path = label_paths[i]

        # Creates masks for the image pairs
        mask_image(example_path, export_path_example, size, layover,
                   input_size)
        mask_image(label_path, export_path_label, size, layover, input_size)

        q.task_done()
示例#4
0
    def rclone_process(self, cmd):
        '''
        Spawn subprocess to interact with command line

            Parameters:
                cmd (list): Command to passed to Popen

            Returns:
                stoud (list): formatted stdout from subprocess
        '''
        logging.debug("rclone_process: Invoking: {}".format(cmd))
        self.process = subprocess.Popen(cmd,
                                        stdin=subprocess.PIPE,
                                        stdout=subprocess.PIPE,
                                        stderr=subprocess.PIPE,
                                        universal_newlines=True)
        self.process.stdin.write(self.password)  # Write password
        self.process.stdin.close()  # close for safety

        stdout = ""
        start_time = time.monotonic()  #Get start time of process
        indx = 0  # Color index
        print_process(self.window, "Processing...")
        for line in self.process.stdout:  # Get Real time output from subprocess
            if self.window is not None and self.startProcess is False:
                if cmd[1] in process_list:  # Only run color progress if command in list
                    # get process elapsed time and print
                    fmtTime = time.strftime(
                        "%H:%M:%S", time.gmtime(time.monotonic() - start_time))
                    print_process(
                        self.window,
                        "Processing... Elapsed Time: {}".format(fmtTime))

                    # update color progress
                    try:
                        self.window['-VIEWPROCESS-'].update(
                            background_color=gradientColor[indx])
                        self.window.Refresh()
                    except Exception as e:
                        logging.debug("rclone_process: {}".format(e))
                        self.process.kill()  # Kill process if error happens
                        return
            stdout += line  # Record process stdout
            indx += 1
            if indx > len(gradientColor) - 1:  # Reset index to 0
                indx = 0
        self.window['-VIEWPROCESS-'].update(
            background_color="#000000")  # Reset to black when done
        print_process(self.window, "Done...")
        return self.rclone_format(stdout)
示例#5
0
def start_sample(container_id, period, analyze_period, output_dir, gpu_id,
                 container_pid, duration_time):
    start_time = time.time()
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    with open(output_dir + '/log_result.csv', 'w') as result_file:
        realtime_log = csv.writer(result_file)

        str_write_realtime = [
            'timestamp', 'cpu_usage(%)', 'mem_used(GiByte)',
            'mem_total(GiByte)', 'IO_read(KiByte/s)', 'IO_write(KiByte/s)',
            'network_receive(KiByte/s)', 'network_transmit(KiByte/s)'
        ]
        for i in range(len(gpu_id)):
            str_write_realtime.append('gpu_usage_' + str(gpu_id[i]))
            str_write_realtime.append('gpu_mem_usage_' + str(gpu_id[i]))
            str_write_realtime.append('gpu_mem_used_' + str(gpu_id[i]))
            str_write_realtime.append('gpu_mem_total_' + str(gpu_id[i]))
        realtime_log.writerow(str_write_realtime)

        sample_list = list()
        # container_cpu_file = ''
        # container_mem_file = ''
        # container_blk_file = ''
        # container_net_file = ''

        if int(container_pid) == -1:
            container_cpu_file = '/sys/fs/cgroup/cpuacct/cpuacct.stat'
            container_mem_file = '/sys/fs/cgroup/memory/memory.usage_in_bytes'
            container_blk_file = '/sys/fs/cgroup/blkio/blkio.throttle.io_service_bytes'
            container_net_file = '/proc/net/dev'
        else:
            container_cpu_file = glob.glob('/sys/fs/cgroup/cpuacct/docker/' +
                                           str(container_id) +
                                           '*/cpuacct.stat')[0]
            container_mem_file = glob.glob('/sys/fs/cgroup/memory/docker/' +
                                           str(container_id) +
                                           '*/memory.usage_in_bytes')[0]
            container_blk_file = glob.glob(
                '/sys/fs/cgroup/blkio/docker/' + str(container_id) +
                '*/blkio.throttle.io_service_bytes')[0]
            container_net_file = '/proc/' + str(container_pid) + '/net/dev'

        adviser = Adviser()
        sample_datas = list()
        stop_flag = False
        last_time = time.time()
        while not (os.path.exists("./stop.flag") or stop_flag):
            sample_data = get_sample_data(container_cpu_file,
                                          container_mem_file,
                                          container_blk_file,
                                          container_net_file, gpu_id, period)

            str_write_realtime = sample_data.get_array()
            str_write_realtime.insert(0, time.time() - start_time)
            sample_list.append(str_write_realtime)
            sample_datas.append(str_write_realtime)
            realtime_log.writerow(str_write_realtime)

            # if len(sample_list) > analyze_period / period:
            if time.time() - last_time >= analyze_period:
                last_time = time.time()
                adviser.detect_pattern(sample_list)
                sample_list = list()
                if duration_time != -1:
                    print_process(
                        (time.time() - start_time) / (duration_time * 60))
                    stop_flag = True if time.time(
                    ) - start_time > duration_time * 60 else False
        print_process(1)
        adviser.get_advise()
    sample_datas = pd.read_csv(output_dir + '/log_result.csv').values
    analyze_value(sample_datas, period, gpu_id)
    draw_graph(sample_datas, output_dir, period, gpu_id)
示例#6
0
文件: profiler.py 项目: wutb15/pai
def start_sample(container_id, period, analyze_period, duration, output_dir,
                 gpu_id, container_pid):
    start_time = time.time()
    if not os.path.exists('./' + output_dir):
        os.mkdir(output_dir)
    realtime_log = csv.writer(open('./' + output_dir + '/log_result.csv',
                                   'w'))  # , newline=''))

    str_write_realtime = [
        'cpu_usage', 'mem_used', 'mem_total', 'IO_read', 'IO_write',
        'network_receive', 'network_transmit'
    ]
    for i in range(len(gpu_id)):
        str_write_realtime.append('gpu_usage_' + str(gpu_id[i]))
        str_write_realtime.append('gpu_mem_usage_' + str(gpu_id[i]))
        str_write_realtime.append('gpu_mem_used_' + str(gpu_id[i]))
        str_write_realtime.append('gpu_mem_total_' + str(gpu_id[i]))
    realtime_log.writerow(str_write_realtime)

    nv.nvmlInit()
    sample_list = list()
    container_cpu_file = ''
    container_mem_file = ''
    container_blk_file = ''
    container_net_file = ''

    if int(container_pid) == -1:
        container_cpu_file = '/sys/fs/cgroup/cpuacct/cpuacct.stat'
        container_mem_file = '/sys/fs/cgroup/memory/memory.usage_in_bytes'
        container_blk_file = '/sys/fs/cgroup/blkio/blkio.throttle.io_service_bytes'
        container_net_file = '/proc/net/dev'
    else:
        container_cpu_file = glob.glob('/sys/fs/cgroup/cpuacct/docker/' +
                                       str(container_id) + '*/cpuacct.stat')[0]
        container_mem_file = glob.glob('/sys/fs/cgroup/memory/docker/' +
                                       str(container_id) +
                                       '*/memory.usage_in_bytes')[0]
        container_blk_file = glob.glob('/sys/fs/cgroup/blkio/docker/' +
                                       str(container_id) +
                                       '*/blkio.throttle.io_service_bytes')[0]
        container_net_file = '/proc/' + str(container_pid) + '/net/dev'

    adviser = Adviser()

    while time.time() - start_time < duration * 60:
        sample_data = get_sample_data(container_cpu_file, container_mem_file,
                                      container_blk_file, container_net_file,
                                      gpu_id, period)

        sample_list.append(sample_data.get_array())

        str_write_realtime = [
            sample_data.get_cpu_usage(),
            sample_data.get_mem_used(),
            sample_data.get_mem_total(),
            sample_data.get_read_bytes(),
            sample_data.get_write_bytes(),
            sample_data.get_network_receive(),
            sample_data.get_network_transmit()
        ]
        # the real-time file will log the information of all the GPUs that the model use
        for i in range(len(gpu_id)):
            str_write_realtime.append(sample_data.get_gpu_usage()[i])
            str_write_realtime.append(sample_data.get_gpu_mem()[i])
            str_write_realtime.append(sample_data.get_gpu_mem_used()[i])
            str_write_realtime.append(sample_data.get_gpu_mem_total()[i])
        realtime_log.writerow(str_write_realtime)

        if len(sample_list) > analyze_period / period:
            analyze_samples(sample_list, adviser)
            sample_list = list()
            print_process((time.time() - start_time) / (duration * 60))
    print_process(1)
    analyze_result = adviser.get_advise()
示例#7
0
def main():

    with open('../data/ex_train_list.pkl', 'rb') as f1:
        train_list = pickle.load(f1)
    with open('../data/ex_landmark.pkl', 'rb') as f2:
        landmark = pickle.load(f2)

    print(len(train_list))

    print('Loading data...')
    x_train, label, shape = load_data(train_list, landmark)
    x_train = np.asarray(x_train)
    shape = np.asarray(shape)
    label = np.asarray(label)

    (im_train, lm_train, gt_train), (x_val, lm_val,
                                     gt_val) = split_data(x_train,
                                                          shape,
                                                          label,
                                                          split_ratio=0.1)

    img_ph = tf.placeholder(tf.float32, [None, 224, 224, 3])
    lm_ph = tf.placeholder(tf.float32, [None, 51 * 2])
    label_ph = tf.placeholder(tf.float32, [None, 8])
    keep_prob = tf.placeholder(tf.float32)
    lr_ph = tf.placeholder(tf.float32)

    with tf.Session() as sess:

        dan = vgg_face.Vgg_face()
        dan.build(img_ph, keep_prob)
        dgn = vgg_face.DGN()
        dgn.build(lm_ph, keep_prob)

        with tf.name_scope('dan'):
            dan_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
                logits=dan.fc8, labels=label_ph)
            dan_loss = tf.reduce_mean(dan_cross_entropy)

        with tf.name_scope('dgn'):
            dgn_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
                logits=dgn.fc3, labels=label_ph)
            dgn_loss = tf.reduce_mean(dgn_cross_entropy)

        with tf.name_scope('dagn'):
            dagn_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
                logits=dan.fc8 + dgn.fc3, labels=label_ph)
            dagn_loss = tf.reduce_mean(dagn_cross_entropy)

        with tf.name_scope('loss'):
            loss = dan_loss + dgn_loss + 0.1 * dagn_loss
            train_step = tf.train.AdamOptimizer(lr_ph).minimize(loss)

        with tf.name_scope('acc'):
            pred = tf.nn.softmax(dan.fc8 + dgn.fc3)
            correct_prediction = tf.equal(tf.argmax(pred, 1),
                                          tf.argmax(label_ph, 1))
            accuracy = tf.reduce_sum(tf.cast(correct_prediction, tf.float32))

        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver()
        best_acc = 0.0
        lr = 1e-4
        best_loss = 1000
        for i in range(epoch):

            if i % 50 == 0 and i != 0:
                lr = 1e-4
                print('\nlearning rate has reset to', lr)
            lr = 0.98 * lr

            cnt = 0
            for im, lm, gt in gen_batch(im_train, lm_train, gt_train,
                                        batch_size):

                tStart = time.time()
                sess.run(train_step,
                         feed_dict={
                             img_ph: im,
                             lm_ph: lm,
                             label_ph: gt,
                             keep_prob: 1.0,
                             lr_ph: lr
                         })
                tEnd = time.time()
                print_process(cnt, im_train.shape[0] // batch_size,
                              tEnd - tStart)
                if cnt == im_train.shape[0] // batch_size:
                    break
                cnt += 1

            train_acc = 0.0
            train_loss = 0.0
            for im, lm, gt in gen_batch(im_train, lm_train, gt_train,
                                        batch_size):

                acc, l = sess.run((accuracy, loss),
                                  feed_dict={
                                      img_ph: im,
                                      lm_ph: lm,
                                      label_ph: gt,
                                      keep_prob: 1.0
                                  })
                train_acc += acc
                train_loss += l

            val_acc = 0.0
            val_loss = 0.0
            for im, lm, gt in gen_batch(x_val, lm_val, gt_val, batch_size):

                acc, l = sess.run((accuracy, loss),
                                  feed_dict={
                                      img_ph: im,
                                      lm_ph: lm,
                                      label_ph: gt,
                                      keep_prob: 1.0
                                  })
                val_acc += acc
                val_loss += l

            if (best_acc == val_acc / x_val.shape[0] and best_loss > val_loss
                ) or best_acc < val_acc / x_val.shape[0]:

                print("Epoch: %d, training accuracy %.4f, loss: %.4f, val_acc: %.4f, val_loss: %.4f     val improve from %.4f to %.4f, save model." \
                    %(i+1, train_acc/im_train.shape[0], train_loss, val_acc/x_val.shape[0], val_loss, best_acc, val_acc/x_val.shape[0]))
                best_acc = val_acc / x_val.shape[0]
                best_loss = val_loss
                saver.save(sess, '../model/dgan.ckpt')

            else:

                print("Epoch: %d, training accuracy %.4f, loss: %.4f, val_acc: %.4f, val_loss: %.4f     val_acc doesn't improve." \
                 %(i+1, train_acc/im_train.shape[0], train_loss, val_acc/x_val.shape[0], val_loss))