Esempio n. 1
0
    def preprocess_model_server(self, data, data_type):
        if data_type == 'URL' or data_type == 'PATH':
            data_path = os.path.normpath(data)
            data_name = data_path.split('/')[-1]
            ext_name = data_path.split('/')[-1].split('.')[-1].lower()

            if ext_name in ['jpg', 'jpeg', 'png', 'bmp', 'gif']:
                image_data = Image.open(data_path)
                img_data = np.fromstring(image_data.tobytes(), dtype=np.uint8)
                img_data = img_data.reshape(
                    (image_data.size[1], image_data.size[0],
                     len(image_data.getbands())))
                return img_data, data_name, 'IMAGE'
            elif ext_name in ['mp4', 'avi']:
                reader = imageio.get_reader(data_path)
                image_list = []
                for im in reader:
                    img_data = np.fromstring(im.tobytes(), dtype=np.uint8)
                    img_data = img_data.reshape(
                        (im.shape[0], im.shape[1], im.shape[2]))
                    image_list.append(np.expand_dims(img_data, 0))

                image_volume = np.vstack(image_list)
                return image_volume, data_name, 'VIDEO'
            elif ext_name in ['txt']:
                with open(data_path, 'r') as fp:
                    content = fp.read()
                    return content, data_name, 'FILE'
            else:
                # TODO: support video and sound
                logger.warn('dont support file type %s' % ext_name)
                return None, None, None
        else:
            return data, str(uuid.uuid4()), 'STRING'
Esempio n. 2
0
  def random(self, count=1):
    # get study configuration
    study_configuration = json.loads(self.study.study_configuration)

    try_count = 0
    proposed_search_space = []
    while True:
      if try_count > 50:
        logger.warn('couldnt find valid graph structure for study %s'%self.study.name)
        return [(None, None)]

      random_p = random.choice(list(range(self.population_size)))
      default_graph_str = study_configuration['searchSpace']['current_population'][random_p]
      default_graph = Decoder().decode(default_graph_str)
      default_graph.layer_factory = BaseLayerFactory()
      default_graph_info = study_configuration['searchSpace']['current_population_info'][random_p]

      # 1.step random mutate one cell
      graph, graph_info = self.mutation_operator._mutate_for_cell(copy.deepcopy(default_graph),
                                                                  copy.deepcopy(default_graph_info))

      # 2.step skip branch in branch
      graph, graph_info = self.mutation_operator._mutate_for_branch(graph, graph_info)

      # 3.step random skip once between cells
      if random.random() < self.mutation_operator._mutate_rate_for_skip_cell:
        for start_layer_id, end_layer_id in self.mutation_operator._find_allowed_skip_cell(graph_info):
          try:
            graph = self.mutation_operator._mutate_for_skip_cell(graph, start_layer_id, end_layer_id)
            break
          except:
            pass

      # 4.step mutation branch in cell
      for _ in range(self.branch_num - 1):
        for start_layer_id, end_layer_id in self.mutation_operator._find_allowed_skip_branch(graph_info):
          try:
            graph = self.mutation_operator._mutate_for_skip_branch(graph, start_layer_id, end_layer_id)
            break
          except:
            pass

      # graph.visualization('%s.png' % (str(uuid.uuid4())))
      graph_dna = self.dna(graph, graph_info)
      trials = Trial.filter(study_name=self.study.name)
      is_not_valid = False
      for t in trials:
        if str(t.structure_encoder) == str(graph_dna.tolist()):
          is_not_valid = True
          break

      if is_not_valid:
        try_count += 1
        continue

      proposed_search_space.append((graph_dna, Encoder(skipkeys=True).encode(graph), graph_info))
      if len(proposed_search_space) == count:
        break

    return proposed_search_space
Esempio n. 3
0
    def __vcg_try_get(A, page_url, try_times=5):
        while try_times != 0:
            Result = A.get(page_url, timeout=7, allow_redirects=True)
            if Result.status_code == 200:
                return Result

            logger.warn("sleep 10s, continue try.")
            time.sleep(10)
            try_times -= 1

        return None
Esempio n. 4
0
  def dispatch_prepare_data(self, data, data_type):
    data_path = None
    data_name = None
    if data_type == 'URL':
      # download data
      download_path = os.path.join(self.api_static_path, 'input')
      if not os.path.exists(download_path):
        os.makedirs(download_path)

      data_name = os.path.normpath(data).split('/')[-1]
      data_name = '%s_%s' % (str(uuid.uuid4()), data_name)
      data_path = download(data, os.path.join(self.api_static_path, 'input'), data_name)
      data_path = os.path.normpath(data_path)
    elif data_type == 'PATH':
      data_name = data.split('/')[-1]
      if os.path.exists(os.path.join(self.api_static_path, 'input', data_name)):
        data_path = os.path.join(self.api_static_path, 'input', data_name)

    if data_type == 'URL' or data_type == 'PATH':
      ext_name = data_path.split('/')[-1].split('.')[-1].lower()
      if ext_name in ['jpg', 'jpeg', 'png', 'bmp']:
        image_data = Image.open(data_path)
        img_data = np.fromstring(image_data.tobytes(), dtype=np.uint8)
        img_data = img_data.reshape((image_data.size[1], image_data.size[0], len(image_data.getbands())))
        fsize = os.path.getsize(data_path)
        fsize = fsize / float(1024 * 1024)
        return img_data, data_name, 'IMAGE', round(fsize, 2), {}
      elif ext_name in ['mp4']:
        # TODO: bug
        reader = imageio.get_reader(data_path)
        image_list = []
        for im in reader:
          img_data = np.fromstring(im.tobytes(), dtype=np.uint8)
          img_data = img_data.reshape((im.shape[0], im.shape[1], im.shape[2]))
          image_list.append(np.expand_dims(img_data, 0))

        image_volume = np.vstack(image_list)

        fsize = os.path.getsize(data_path)
        fsize = fsize / float(1024 * 1024)
        return image_volume, data_name, 'VIDEO', round(fsize,2), {'FPS': reader.get_meta_data()['fps']}
      else:
        # TODO: support video and sound
        logger.warn('dont support file type %s' % ext_name)

    return None, None, None, 0
Esempio n. 5
0
def _pick_idle_port(from_port=40000, check_count=100):
  check_port = from_port
  while check_count:
    if not _is_open('127.0.0.1', check_port):
      break

    logger.warn('port %d is occupied, try to use %d port'%(int(check_port), int(check_port + 1)))

    check_port += 1
    check_count -= 1

    if check_count == 0:
      check_port = None

  if check_port is None:
    logger.warn('couldnt find valid free port')
    exit(-1)

  return check_port
Esempio n. 6
0
    def __vcg_find_and_download(waiting_process_queue, save_dir):
        A = requests.Session()
        A.headers = headers
        query_url = 'https://www.vcg.com/creative-image/{}'.format(keyword)
        if download_data_type == 'video':
            query_url = 'https://www.vcg.com/creative-video-search/{}'.format(
                keyword)

        download_count = 0
        content = None
        nav_page_list = []
        Result = A.get(query_url, timeout=7, allow_redirects=True)
        if Result.status_code != 200:
            logger.error("%s couldnt connect." % query_url)
            return

        content = Result.text
        soup = BeautifulSoup(content)
        # 1.step 分析待检索页面
        if download_data_type == 'image':
            pages = soup.findAll("a", class_="paginationClickBox")
            last_page = pages[-1]

            total_page = soup.findAll("span", class_="paginationTotal")
            try:
                total_page_num = int(last_page.text)
                if len(total_page) > 0:
                    total_page_num = (int)(total_page[0].text[1:-1])
            except:
                logger.error("Fail to parse nav page, use default 2.")
                total_page_num = 2

            page_nav_url = last_page.get('href').split('?')[0]
            for i in range(2, total_page_num + 1):
                nav_page_list.append('%s?page=%d' % (page_nav_url, i))
        else:
            pages = soup.findAll("a", class_="_2IlL4")
            last_page = pages[-2]
            try:
                total_page_num = int(last_page.text)
            except:
                logger.error("Fail to parse nav page, use default 2.")
                total_page_num = 2

            page_nav_url = last_page.get('href').split('?')[0]
            for i in range(2, total_page_num + 1):
                nav_page_list.append('%s?page=%d' % (page_nav_url, i))

        # 2.step 分析当前页面的图像
        logger.info("Analyze nav page(%d/%d) %s" %
                    (1, len(nav_page_list), query_url))
        img_url_list = []
        if download_data_type == 'image':
            # 分析图像资源
            img_list = soup.findAll("img", class_="lazyload_hk")
            img_url_list = [
                "http://{}".format(p.get("data-src")[2:]) for p in img_list
            ]
        else:
            # 分析视频资源
            img_list = soup.findAll('source', type="image/webp")
            img_url_list = []
            video_name_list = []
            for p in img_list:
                kk = p.get('data-srcset')[2:].split('/')[3].split(
                    '?')[0].split('_')[0]
                if kk not in video_name_list:
                    video_name_list.append(kk)
                    img_url_list.append(
                        'http://gossv.cfp.cn/videos/mts_videos/medium/temp/{}.mp4'
                        .format(kk))

        for img_url in img_url_list:
            __vcg_img_download(waiting_process_queue, save_dir, img_url,
                               keyword, download_count)
            download_count += 1

        # 3.step 继续分析所有等待导航页面
        for page_index, page_url in enumerate(nav_page_list):
            logger.info("Analyze nav page(%d/%d) %s" %
                        (page_index + 2, len(nav_page_list), page_url))
            Result = A.get(page_url, timeout=7, allow_redirects=True)
            if Result.status_code != 200:
                logger.warn("Couldnt connect and analyze %s. (page %s)" %
                            ((Result.text, page_url)))
                Result = __vcg_try_get(A, page_url, 5)
                if Result is None:
                    logger.warn("Couldnt connect %s, return." % page_url)
                    return

            content = Result.text
            soup = BeautifulSoup(content)

            img_url_list = []
            if download_data_type == 'image':
                # 分析图像资源
                img_list = soup.findAll("img", class_="lazyload_hk")
                img_url_list = [
                    "http://{}".format(p.get("data-src")[2:]) for p in img_list
                ]
            else:
                # 分析视频资源
                img_list = soup.findAll('source', type="image/webp")
                img_url_list = []
                video_name_list = []
                for p in img_list:
                    kk = p.get('data-srcset')[2:].split('/')[3].split(
                        '?')[0].split('_')[0]
                    if kk not in video_name_list:
                        video_name_list.append(kk)
                        img_url_list.append(
                            'http://gossv.cfp.cn/videos/mts_videos/medium/temp/{}.mp4'
                            .format(kk))

            logger.info("Finding %d data" % len(img_url_list))
            for img_url in img_url_list:
                __vcg_img_download(waiting_process_queue, save_dir, img_url,
                                   keyword, download_count)
                download_count += 1

        # 添加结束标记
        if waiting_process_queue is not None:
            waiting_process_queue.put(None)
Esempio n. 7
0
def vcg_download(keyword,
                 download_params,
                 download_save_dir,
                 process_queue=None):
    # 视觉中国
    headers = {
        'Accept-Language':
        'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
        'Connection': 'keep-alive',
        'User-Agent':
        'Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0',
        'Upgrade-Insecure-Requests': '1'
    }
    download_data_type = 'image'
    if download_params is not None:
        download_data_type = download_params.get('download_data_type', 'image')
        if download_data_type not in ['image', 'video']:
            logger.warn('Only support donwload data_type: image or video')
            download_data_type = 'image'

    def __vcg_img_download(waiting_process_queue, save_dir, img_url, keyword,
                           count):
        try:
            logger.info("Downloading(%d) %s." % (count + 1, img_url))
            pic = requests.get(img_url, timeout=7)
        except BaseException:
            logger.error("Couldnt download %s." % img_url)
            return
        else:
            file_prefix = 'VCG_' + keyword + '_' + str(count)
            file_name = file_prefix + '.jpg' if download_data_type == 'image' else file_prefix + '.mp4'
            file_path = os.path.join(save_dir, file_name)
            fp = open(file_path, 'wb')
            fp.write(pic.content)
            fp.close()
            logger.info("Finish download %s ." % img_url)

            if waiting_process_queue is not None:
                waiting_process_queue.put(file_path)

    def __vcg_try_get(A, page_url, try_times=5):
        while try_times != 0:
            Result = A.get(page_url, timeout=7, allow_redirects=True)
            if Result.status_code == 200:
                return Result

            logger.warn("sleep 10s, continue try.")
            time.sleep(10)
            try_times -= 1

        return None

    def __vcg_find_and_download(waiting_process_queue, save_dir):
        A = requests.Session()
        A.headers = headers
        query_url = 'https://www.vcg.com/creative-image/{}'.format(keyword)
        if download_data_type == 'video':
            query_url = 'https://www.vcg.com/creative-video-search/{}'.format(
                keyword)

        download_count = 0
        content = None
        nav_page_list = []
        Result = A.get(query_url, timeout=7, allow_redirects=True)
        if Result.status_code != 200:
            logger.error("%s couldnt connect." % query_url)
            return

        content = Result.text
        soup = BeautifulSoup(content)
        # 1.step 分析待检索页面
        if download_data_type == 'image':
            pages = soup.findAll("a", class_="paginationClickBox")
            last_page = pages[-1]

            total_page = soup.findAll("span", class_="paginationTotal")
            try:
                total_page_num = int(last_page.text)
                if len(total_page) > 0:
                    total_page_num = (int)(total_page[0].text[1:-1])
            except:
                logger.error("Fail to parse nav page, use default 2.")
                total_page_num = 2

            page_nav_url = last_page.get('href').split('?')[0]
            for i in range(2, total_page_num + 1):
                nav_page_list.append('%s?page=%d' % (page_nav_url, i))
        else:
            pages = soup.findAll("a", class_="_2IlL4")
            last_page = pages[-2]
            try:
                total_page_num = int(last_page.text)
            except:
                logger.error("Fail to parse nav page, use default 2.")
                total_page_num = 2

            page_nav_url = last_page.get('href').split('?')[0]
            for i in range(2, total_page_num + 1):
                nav_page_list.append('%s?page=%d' % (page_nav_url, i))

        # 2.step 分析当前页面的图像
        logger.info("Analyze nav page(%d/%d) %s" %
                    (1, len(nav_page_list), query_url))
        img_url_list = []
        if download_data_type == 'image':
            # 分析图像资源
            img_list = soup.findAll("img", class_="lazyload_hk")
            img_url_list = [
                "http://{}".format(p.get("data-src")[2:]) for p in img_list
            ]
        else:
            # 分析视频资源
            img_list = soup.findAll('source', type="image/webp")
            img_url_list = []
            video_name_list = []
            for p in img_list:
                kk = p.get('data-srcset')[2:].split('/')[3].split(
                    '?')[0].split('_')[0]
                if kk not in video_name_list:
                    video_name_list.append(kk)
                    img_url_list.append(
                        'http://gossv.cfp.cn/videos/mts_videos/medium/temp/{}.mp4'
                        .format(kk))

        for img_url in img_url_list:
            __vcg_img_download(waiting_process_queue, save_dir, img_url,
                               keyword, download_count)
            download_count += 1

        # 3.step 继续分析所有等待导航页面
        for page_index, page_url in enumerate(nav_page_list):
            logger.info("Analyze nav page(%d/%d) %s" %
                        (page_index + 2, len(nav_page_list), page_url))
            Result = A.get(page_url, timeout=7, allow_redirects=True)
            if Result.status_code != 200:
                logger.warn("Couldnt connect and analyze %s. (page %s)" %
                            ((Result.text, page_url)))
                Result = __vcg_try_get(A, page_url, 5)
                if Result is None:
                    logger.warn("Couldnt connect %s, return." % page_url)
                    return

            content = Result.text
            soup = BeautifulSoup(content)

            img_url_list = []
            if download_data_type == 'image':
                # 分析图像资源
                img_list = soup.findAll("img", class_="lazyload_hk")
                img_url_list = [
                    "http://{}".format(p.get("data-src")[2:]) for p in img_list
                ]
            else:
                # 分析视频资源
                img_list = soup.findAll('source', type="image/webp")
                img_url_list = []
                video_name_list = []
                for p in img_list:
                    kk = p.get('data-srcset')[2:].split('/')[3].split(
                        '?')[0].split('_')[0]
                    if kk not in video_name_list:
                        video_name_list.append(kk)
                        img_url_list.append(
                            'http://gossv.cfp.cn/videos/mts_videos/medium/temp/{}.mp4'
                            .format(kk))

            logger.info("Finding %d data" % len(img_url_list))
            for img_url in img_url_list:
                __vcg_img_download(waiting_process_queue, save_dir, img_url,
                                   keyword, download_count)
                download_count += 1

        # 添加结束标记
        if waiting_process_queue is not None:
            waiting_process_queue.put(None)

    # 搜索和下载
    if not os.path.exists(os.path.join(download_save_dir, 'test')):
        os.makedirs(os.path.join(download_save_dir, 'test'))

    if process_queue is not None:
        t = threading.Thread(target=__vcg_find_and_download,
                             args=(process_queue,
                                   os.path.join(download_save_dir, 'test')))
        t.start()
    else:
        __vcg_find_and_download(process_queue,
                                os.path.join(download_save_dir, 'test'))
Esempio n. 8
0
    def get_new_suggestions(self, number=1, **kwargs):
        # 1.step get current population tag from study configuration
        study_configuration = json.loads(self.study.study_configuration)
        current_population_tag = int(
            study_configuration['searchSpace']['current_population_tag'])

        # 2.step fix failed trial (replace those failed trials)
        trials = Trial.filter(study_name=self.study.name,
                              tag=current_population_tag)
        for trial in trials:
            if trial.status == 'Failed':
                logger.warn('trail (id %s) is error and rebuilded' %
                            (trial.name))

                # generate new individual
                graph_encoder_str, graph_info = self.random()
                trial.structure = [graph_encoder_str, graph_info]
                trial.structure_encoder = None
                trial.name = '%s-%s' % (str(
                    uuid.uuid4()), datetime.fromtimestamp(
                        timestamp()).strftime('%Y%m%d-%H%M%S-%f'))

                temp_graph = Decoder().decode(graph_encoder_str)
                temp_graph.update_by(graph_info)
                trial.multi_objective_value = [1.0 / temp_graph.flops]
                trial.status = None

        # 3.step get candidate trials of study
        candidate_trails = Trial.filter(study_name=self.study.name,
                                        tag=current_population_tag,
                                        status=None)
        if len(candidate_trails) == 0:
            uncompleted_trails = Trial.filter(study_name=self.study.name,
                                              tag=current_population_tag,
                                              status="UnCompleted")
            if len(uncompleted_trails) > 0:
                # study not stop, all free worker should wait
                return None

        while len(candidate_trails) == 0:
            # 3.1.step incubate next generation population
            elite_population = Population()
            completed_trials = Trial.filter(study_name=self.study.name,
                                            tag=current_population_tag,
                                            status="Completed")

            for t in completed_trials:
                me = self.evolution_control.problem.generateIndividual()
                me.id = t.name
                me.features = [
                    Decoder().decode(t.structure[0]), t.structure[1]
                ]
                me.type = 'parent'
                me.objectives[0] = t.objective_value
                me.objectives[1] = t.multi_objective_value[0]
                self.evolution_control.problem.calculate_objectives(me)
                elite_population.population.append(me)

            if current_population_tag >= 1:
                grandpa_population = Population()
                grandpa_completed_trials = Trial.filter(
                    study_name=self.study.name,
                    tag=current_population_tag - 1,
                    status="Completed")

                for t in grandpa_completed_trials:
                    me = self.evolution_control.problem.generateIndividual()
                    me.id = t.name
                    me.features = [
                        Decoder().decode(t.structure[0]), t.structure[1]
                    ]
                    me.type = 'parent'
                    me.objectives[0] = t.objective_value
                    me.objectives[1] = t.multi_objective_value[0]
                    self.evolution_control.problem.calculate_objectives(me)
                    grandpa_population.population.append(me)

                elite_population = self.evolution_control.evolve(
                    grandpa_population, elite_population)

            # cubate next generation by elite population
            offspring_population = self.evolution_control.create_children(
                elite_population)
            current_population_tag += 1

            # 3.2.step update trials
            study_current_population = []
            study_current_population_info = []
            for p in offspring_population.population:
                trail_name = '%s-%s' % (str(
                    uuid.uuid4()), datetime.fromtimestamp(
                        timestamp()).strftime('%Y%m%d-%H%M%S-%f'))
                trial = Trial.create(
                    Trial(self.study.name,
                          trail_name,
                          created_time=time.time(),
                          updated_time=time.time()))
                trial.structure = [
                    Encoder(skipkeys=True).encode(p.features[0]), p.features[1]
                ]
                trial.structure_encoder = None

                temp_graph = Decoder().decode(trial.structure[0])
                temp_graph.update_by(trial.structure[1])
                trial.objective_value = -1
                trial.multi_objective_value = [1.0 / temp_graph.flops]
                trial.tag = current_population_tag

                study_current_population.append(trial.structure[0])
                study_current_population_info.append(trial.structure[1])

            # update study configuration
            study_configuration['searchSpace'][
                'current_population'] = study_current_population
            study_configuration['searchSpace'][
                'current_population_info'] = study_current_population_info
            study_configuration['searchSpace'][
                'current_population_tag'] = current_population_tag

            # regenerate candidate trials
            candidate_trails = Trial.filter(study_name=self.study.name,
                                            tag=current_population_tag,
                                            status=None)

        self.study.study_configuration = json.dumps(study_configuration)
        trial_suggestion = random.choice(candidate_trails)
        trial_suggestion.status = 'UnCompleted'
        return [trial_suggestion]
Esempio n. 9
0
    def random(self, count=1):
        try_count = 0
        proposed_search_space = []
        while True:
            if try_count > 50:
                logger.warn('couldnt find valid graph structure for study %s' %
                            self.study.name)
                return [(None, None)]

            # 1.step make a structure suggestion
            clone_graph = copy.deepcopy(self.graph)
            outputs = clone_graph.get_input()
            output_node_id = -1
            decoder_output_last = []

            graph_encoder = []
            for output_index, output_id in enumerate(outputs):
                branch_offset = (1 + 8 * 8 + 4 * 4 + self.branches)
                graph_output_encoder = np.zeros(
                    (branch_offset * self.branches), dtype=np.float32)

                if output_index > 0:
                    if random.random() > 0.5:
                        graph_encoder.extend(graph_output_encoder.tolist())
                        continue

                output = clone_graph.node_list[output_id]

                temp = [output_id]
                for node_id in decoder_output_last:
                    if clone_graph.node_list[node_id].shape[1] != output.shape[1] or \
                            clone_graph.node_list[node_id].shape[2] != output.shape[2]:
                        output_node_id = clone_graph.add_layer(
                            clone_graph.layer_factory.bilinear_resize(
                                height=output.shape[1], width=output.shape[2]),
                            node_id)
                        temp.append(output_node_id)
                    else:
                        temp.append(output_node_id)

                if len(temp) > 1:
                    output_node_id = clone_graph.add_layer(
                        clone_graph.layer_factory.concat(), temp)
                    X = [output_node_id]
                else:
                    output_node_id = temp[0]
                    X = temp

                for branch_index in range(self.branches):
                    # random select branch input from X
                    X_index_list = list(range(len(X)))
                    X_select_index_list = sample(
                        X_index_list, random.randint(1, len(X_index_list)))
                    X_selected = [X[i] for i in X_select_index_list]

                    # concat all input
                    if len(X_selected) > 1:
                        output_node_id = clone_graph.add_layer(
                            clone_graph.layer_factory.concat(), X_selected)
                    else:
                        output_node_id = X_selected[0]

                    # encoder 1.step connect
                    graph_output_encoder[branch_index * branch_offset:(
                        branch_index +
                        1) * branch_offset][X_select_index_list] = 1.0

                    # operator space
                    r = random.randint(0, 2)
                    if r == 0:
                        # 1x1 convolution (conv + bn + relu)
                        shape = clone_graph.node_list[output_node_id].shape
                        output_node_id = clone_graph.add_layer(
                            clone_graph.layer_factory.conv2d(
                                input_channel=shape[3],
                                filters=self.channels,
                                kernel_size_h=1,
                                kernel_size_w=1), output_node_id)
                        output_node_id = clone_graph.add_layer(
                            clone_graph.layer_factory.bn2d(), output_node_id)
                        output_node_id = clone_graph.add_layer(
                            clone_graph.layer_factory.relu(), output_node_id)

                        # encoder 2.step 1x1 convolution
                        graph_output_encoder[branch_index *
                                             branch_offset:(branch_index + 1) *
                                             branch_offset][self.branches +
                                                            0] = 1.0
                    elif r == 1:
                        # 3x3 atrous separable convolution
                        shape = clone_graph.node_list[output_node_id].shape
                        # rate 1,3,6,9,12,15,18,21
                        min_hw = min(shape[1], shape[2])
                        rate_list = [1, 3, 6, 9, 12, 15, 18, 21]
                        rate_list = [
                            rate_list[i] for i in range(len(rate_list))
                            if rate_list[i] < min_hw
                        ]

                        rate_h_index = random.randint(0, len(rate_list) - 1)
                        rate_h = rate_list[rate_h_index]
                        rate_w_index = random.randint(0, len(rate_list) - 1)
                        rate_w = rate_list[rate_w_index]

                        output_node_id = clone_graph.add_layer(
                            clone_graph.layer_factory.separable_conv2d(
                                input_channel=shape[3],
                                filters=self.channels,
                                kernel_size_h=3,
                                kernel_size_w=3,
                                rate_h=rate_h,
                                rate_w=rate_w), output_node_id)
                        output_node_id = clone_graph.add_layer(
                            clone_graph.layer_factory.bn2d(), output_node_id)
                        output_node_id = clone_graph.add_layer(
                            clone_graph.layer_factory.relu(), output_node_id)

                        # encoder 3.step 3x3 atrous separable convolution
                        graph_output_encoder[branch_index *
                                             branch_offset:(branch_index + 1) *
                                             branch_offset][self.branches + 1 +
                                                            rate_h_index *
                                                            len(rate_list) +
                                                            rate_w_index] = 1.0
                    else:
                        # spatial pyramid pooling
                        shape = clone_graph.node_list[output_node_id].shape
                        min_hw = min(shape[1], shape[2])

                        gh = [1, 2, 4, 8]
                        gh = [n for n in gh if n < min_hw]
                        grid_h_index = random.randint(0, len(gh) - 1)
                        grid_h = gh[grid_h_index]

                        gw = [1, 2, 4, 8]
                        gw = [n for n in gw if n < min_hw]
                        grid_w_index = random.randint(0, len(gw) - 1)
                        grid_w = gw[grid_w_index]
                        output_node_id = clone_graph.add_layer(
                            clone_graph.layer_factory.spp(grid_h=grid_h,
                                                          grid_w=grid_w),
                            output_node_id)

                        # encoder 4.step spp
                        graph_output_encoder[branch_index *
                                             branch_offset:(branch_index + 1) *
                                             branch_offset][self.branches + 1 +
                                                            8 * 8 +
                                                            grid_h_index * 4 +
                                                            grid_w_index] = 1.0

                    X.append(output_node_id)

                output_node_id = clone_graph.add_layer(
                    clone_graph.layer_factory.concat(), X[1:])
                decoder_output_last.append(output_node_id)

                graph_encoder.extend(graph_output_encoder.tolist())

            # check flops
            if clone_graph.flops > self.flops:
                try_count += 1
                continue

            # check structure is not been checked
            trials = Trial.filter(study_name=self.study.name)
            is_not_valid = False
            for t in trials:
                if str(t.structure_encoder) == str(graph_encoder):
                    is_not_valid = True
                    break

            if is_not_valid:
                try_count += 1
                continue

            proposed_search_space.append(
                (np.array(graph_encoder),
                 Encoder(skipkeys=True).encode(clone_graph)))
            if len(proposed_search_space) == count:
                break

        return proposed_search_space
Esempio n. 10
0
    def asynProcess(self, preprocess_type, data):
        if preprocess_type == 'DOWNLOAD':
            try:
                # 1.step 下载(如果有必要)
                pic = requests.get(data['url'], timeout=7)
                download_path = os.path.join(self.demo_dump, 'static', 'input',
                                             data['file_name'])
                with open(download_path, 'wb') as fp:
                    fp.write(pic.content)

                # 2.step 检查
                # 2.1.step 检查文件大小
                if 'file_size' in self.demo_constraint:
                    max_file_size = self.demo_constraint['file_size']
                    fsize = os.path.getsize(download_path) / float(1024 * 1024)
                    if round(fsize, 2) > max_file_size:
                        return {
                            'status':
                            400,
                            'code':
                            'InvalidImageSize',
                            'message':
                            'The input file size is too large (>%f MB)' %
                            float(max_file_size)
                        }

                # 2.2.step 检查文件格式
                # 图片格式检测(图片文件后缀可能不对)
                download_path = os.path.normpath(download_path)
                file_type = imghdr.what(download_path)
                if file_type in ['jpeg', 'png', 'gif', 'bmp']:
                    # 图像文件
                    file_name = '%s.%s' % (data['file_name'], file_type)
                    os.rename(
                        download_path,
                        os.path.join('/'.join(download_path.split('/')[0:-1]),
                                     file_name))
                    download_path = os.path.join(
                        '/'.join(download_path.split('/')[0:-1]), file_name)
                else:
                    # 非图像文件
                    file_type = download_path.split('.')[-1]

                if 'file_type' in self.demo_constraint:
                    if file_type not in self.demo_constraint['file_type']:
                        return {
                            'status':
                            400,
                            'code':
                            'InvalidImageFormat',
                            'message':
                            'The input file is not in a valid image format that the service can support'
                        }

                return {'status': 200, 'path': download_path}
            except:
                print('Fail to download %s' % data['url'])
                return {'status': 500, 'code': 'UnkownError'}
        elif preprocess_type == 'RECEIVE':
            try:
                # 1.step 保存文件
                file_path = data['file_path']
                file_data = data['file_data']
                with open(file_path, 'wb') as fp:
                    fp.write(file_data)

                # 2.step 检查
                # 2.1.step 检查文件大小
                if 'file_size' in self.demo_constraint:
                    max_file_size = self.demo_constraint['file_size']
                    fsize = os.path.getsize(file_path) / float(1024 * 1024)
                    if round(fsize, 2) > max_file_size:
                        return {
                            'status':
                            400,
                            'code':
                            'InvalidImageSize',
                            'message':
                            'The input file size is too large (>%f MB)' %
                            float(max_file_size)
                        }

                # 2.2.step 检查文件格式
                # 图片格式检测(图片文件后缀可能不对)
                file_path = os.path.normpath(file_path)
                file_type = imghdr.what(file_path)
                if file_type in ['jpeg', 'png', 'gif', 'bmp']:
                    # 图像文件
                    file_name = '%s.%s' % (
                        file_path.split('/')[-1].split('.')[0], file_type)
                    os.rename(
                        file_path,
                        os.path.join('/'.join(file_path.split('/')[0:-1]),
                                     file_name))
                    file_path = os.path.join(
                        '/'.join(file_path.split('/')[0:-1]), file_name)
                else:
                    # 非图像文件,通过获取扩展名作为文件类型
                    file_type = file_path.split('.')[-1]

                if 'file_type' in self.demo_constraint:
                    if file_type not in self.demo_constraint['file_type']:
                        return {
                            'status':
                            400,
                            'code':
                            'InvalidImageFormat',
                            'message':
                            'The input file is not in a valid image format that the service can support'
                        }
                return {'status': 200, 'path': file_path}
            except:
                return {'status': 500, 'code': 'UnkownError'}
        elif preprocess_type == 'PACKAGE':
            data_path = os.path.normpath(data['path'])
            data_name = data_path.split('/')[-1]
            ext_name = data_path.split('/')[-1].split('.')[-1].lower()

            # 根据后缀判断上传的数据类型
            if ext_name in ['jpg', 'jpeg', 'png', 'bmp', 'gif']:
                return {'data': (data_path, data_name, 'IMAGE'), 'status': 200}
            elif ext_name in ['mp4', 'avi', 'mov']:
                return {'data': (data_path, data_name, 'VIDEO'), 'status': 200}
            elif ext_name in ['txt']:
                return {'data': (data_path, data_name, 'FILE'), 'status': 200}
            else:
                # TODO: support video and sound
                logger.warn('dont support file type %s' % ext_name)
                return {
                    'status': 400,
                    'code': 'InvalidPackage',
                    'message': 'Fail package'
                }
        elif preprocess_type == 'API_QUERY':
            # 1.step base64解码
            # format {'image': '', 'video': None, 'params': [{'data': ,'type': , 'name': ,},{}]}
            image_str = None
            if 'image' in data:
                image_str = data['image']

            if image_str is None:
                return {
                    'status': 400,
                    'code': 'InvalidData',
                    'message': 'Missing query data'
                }

            image_b = base64.b64decode(image_str)
            return {
                'status': 200,
                'data': {
                    'image': image_b,
                    'params': data['params']
                }
            }
Esempio n. 11
0
File: cmd.py Progetto: zhcm/antgo
  def process_del_command(self):
    dataset_name = FLAGS.dataset_name()
    task_name = FLAGS.task_name()
    experiment_name = FLAGS.experiment_name()
    task_type = FLAGS.task_type()
    task_measure = FLAGS.task_measure()
    group_name = FLAGS.group()
    user_name = FLAGS.user()

    if group_name is not None and user_name is not None:
      # delete user from group
      users_info = {'users': [user_name]}
      res = self.remote_api_request('hub/api/terminal/groups/%s'%str(group_name),
                                   json.dumps(users_info),
                                   'delete')
      if res is not None:
        logger.info('success to del user %s from group %s'%(str(user_name),str(group_name)))
      else:
        logger.error('fail to del user %s from group %s'%(str(user_name),str(group_name)))
      return

    if group_name is not None and user_name is None:
      # detele group
      logger.error('dont support del group')
      return

    if user_name is not None:
      logger.warn('ignore user_name set')

    if dataset_name is None and \
            task_name is None and \
            experiment_name is None and \
            task_type is None:
      logger.error('must set delete object [%s]' % ','.join(['dataset',
                                                             'task',
                                                             'experiment',
                                                             'task-type']))
      return

    if dataset_name is not None:
      delete_remote_api = 'hub/api/terminal/delete/dataset'
      response = self.remote_api_request(delete_remote_api,
                                         action='delete',
                                         data={'dataset-name': dataset_name})
      if response['status'] != 'OK':
        logger.error('delete error')
        return
    elif task_name is not None:
      delete_remote_api = 'hub/api/terminal/delete/task'
      response = self.remote_api_request(delete_remote_api,
                                         action='delete',
                                         data={'task-name': task_name})

      if response['status'] != 'OK':
        logger.error('delete error')
        return
    elif experiment_name is not None:
      delete_remote_api = 'hub/api/terminal/delete/experiment'
      response = self.remote_api_request(delete_remote_api,
                                         action='delete',
                                         data={'experiment-name': experiment_name})

      if response['status'] != 'OK':
        logger.error('delete error, maybe experiment name not unique')
        return
    elif task_type is not None:
      data = {}
      if task_measure is not None:
        task_measures = task_measure.split(',')
        task_measures = json.dumps(task_measures)
        data['task-measures'] = task_measures

      remote_api = 'hub/api/terminal/task/type/%s'%task_type
      response = self.remote_api_request(remote_api,
                                         action='delete',
                                         data=data)

      if response is None:
        logger.error('fail to delete task type')
        return

      logger.info('success to delete task type')
      print(response)