Ejemplo n.º 1
0
def calculate_fid(mu1, mu2, sigma1, sigma2, eps=1e-6):
    """Numpy implementation of the Frechet Distance.
    code from https://github.com/bioinf-jku/TTUR.

    The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
    and X_2 ~ N(mu_2, C_2) is
            d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).

    Stable version by Dougal J. Sutherland.

    Params:
    -- mu1 : Numpy array containing the activations of the pool_3 layer of the
             inception net ( like returned by the function 'get_predictions')
             for generated samples.
    -- mu2   : The sample mean over activations of the pool_3 layer, precalcualted
               on an representive data set.
    -- sigma1: The covariance matrix over activations of the pool_3 layer for
               generated samples.
    -- sigma2: The covariance matrix over activations of the pool_3 layer,
               precalcualted on an representive data set.
    Returns:
    --   : The Frechet Distance.
    """

    mu1 = np.atleast_1d(mu1)
    mu2 = np.atleast_1d(mu2)

    sigma1 = np.atleast_2d(sigma1)
    sigma2 = np.atleast_2d(sigma2)

    assert mu1.shape == mu2.shape, "Training and test mean vectors have different lengths"
    assert sigma1.shape == sigma2.shape, "Training and test covariances have different dimensions"

    diff = mu1 - mu2

    # product might be almost singular
    covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
    if not np.isfinite(covmean).all():
        msg = "fid calculation produces singular product; adding %s to diagonal of cov estimates" % eps
        logger.warning(msg)
        offset = np.eye(sigma1.shape[0]) * eps
        covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))

    # numerical error might give slight imaginary component
    if np.iscomplexobj(covmean):
        if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
            m = np.max(np.abs(covmean.imag))
            raise ValueError("Imaginary component {}".format(m))
        covmean = covmean.real

    tr_covmean = np.trace(covmean)

    return diff.dot(diff) + np.trace(sigma1) + np.trace(
        sigma2) - 2 * tr_covmean
def load_label(file_name):
    labels = []
    if os.path.exists(file_name):
        with open(file_name, "rt") as f:
            lines = f.readlines()
        for line in lines:
            values = [float(s) for s in line.split(' ')]
            if len(values) == 5:
                labels.append(values)
    else:
        logger.warning("Label txt file is not found %s." % (file_name))
    return labels
Ejemplo n.º 3
0
def resolve_file_format(args, import_files, export_file=None):
    if len(import_files) == 1:
        input_ext = os.path.splitext(import_files[0])[1]
        if input_ext == '.nnp' or input_ext == '.nntxt':
            args.import_format = 'NNP'
        elif input_ext == '.onnx':
            args.import_format = 'ONNX'
        elif input_ext == '.pb':
            args.import_format = "TF_PB"
        elif input_ext == '.ckpt':
            args.import_format = "TF_CKPT_V1"
        elif input_ext == '.meta':
            args.import_format = "TF_CKPT_V2"
        elif input_ext == '.tflite':
            args.import_format = "TFLITE"
        elif input_ext == '' and os.path.isdir(import_files[0]):
            args.import_format = "SAVED_MODEL"

    if export_file:
        output_ext = os.path.splitext(export_file)[1]
        if output_ext == '.nnp':
            args.export_format = 'NNP'
        elif output_ext == '.nnb':
            args.export_format = 'NNB'
        elif output_ext == '.onnx':
            args.export_format = 'ONNX'
        elif output_ext == '.pb':
            args.export_format = 'TF_PB'
        elif output_ext == '.tflite':
            args.export_format = 'TFLITE'
        elif output_ext == '':
            logger.warning(
                "The export file format is 'CSRC' or 'SAVED_MODEL' that argument '--export-format' will have to be set!!!"
            )
            assert (args.export_format == 'CSRC'
                    or args.export_format == 'SAVED_MODEL')
    else:
        args.export_format = ''

    if args.import_format in ['ONNX', 'TF_CKPT_V1', 'TF_CKPT_V2', 'TF_PB', 'SAVED_MODEL', 'TFLITE'] or \
            args.export_format in ['ONNX', 'TFLITE', 'SAVED_MODEL', 'TF_PB']:
        try:
            import nnabla.utils.converter.onnx
            import nnabla.utils.converter.tensorflow
        except ImportError:
            raise ImportError(
                'nnabla_converter python package is not found, install nnabla_converter package with "pip install nnabla_converter"'
            )
Ejemplo n.º 4
0
    def execute(self, output):
        onnx_model = OnnxExporter(self._nnp, self._batch_size,
                                  opset="9").export_model_proto()
        tf_rep = prepare(onnx_model)
        self.check_tf_graph(tf_rep.graph)
        graph_def = tf_rep.graph.as_graph_def()
        if self._enable_optimize:
            optimizable_state = check_optimization_criteria(
                self._nnp, self._batch_size)
            if optimizable_state['NCHW_TO_NHWC']['status']:
                from .common import OptimizePb
                optimize = OptimizePb(graph_def).execute()
                graph_def = optimize.export_graph_def()
                import json
                doc_file = output.replace('.', '_') + '.json'
                with open(doc_file, 'w') as f:
                    json.dump(optimize.get_optimization_rate(), f)
            else:
                logger.warning(
                    "Currently this model does not support optimization")
        tf.reset_default_graph()
        with tf.compat.v1.Session() as session:
            _ = tf.import_graph_def(graph_def, name='')
            inputs, outputs = find_out_terminal_node(session.graph_def,
                                                     postfix=True)

            inputs_tensor = [
                session.graph.get_tensor_by_name(inp) for inp in inputs
            ]
            outputs_tensor = [
                session.graph.get_tensor_by_name(oup) for oup in outputs
            ]

            converter = tf.lite.TFLiteConverter.from_session(
                session, inputs_tensor, outputs_tensor)
            converter.optimizations = [tf.lite.Optimize.DEFAULT]
            converter.target_spec.supported_ops = [
                tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
            ]
            tflite_model = converter.convert()
            with open(output, 'wb') as f:
                f.write(tflite_model)
Ejemplo n.º 5
0
 def execute(self, output):
     onnx_model = OnnxExporter(self._nnp, self._batch_size,
                               opset="9").export_model_proto()
     tf_rep = prepare(onnx_model)
     if self._enable_optimize:
         optimizable_state = check_optimization_criteria(
             self._nnp, self._batch_size)
         if optimizable_state['NCHW_TO_NHWC']['status']:
             from .common import OptimizePb
             optimize = OptimizePb(tf_rep.graph.as_graph_def()).execute()
             optimize.export_to_file(output)
             import json
             doc_file = output.replace('.', '_') + '.json'
             with open(doc_file, 'w') as f:
                 json.dump(optimize.get_optimization_rate(), f)
         else:
             logger.warning(
                 "Currently this model does not support optimization")
     else:
         tf_rep.export_graph(output)
Ejemplo n.º 6
0
def get_all_features_on_imagepaths(image_paths: list, batch_size: int):
    """Extract all the given images' feature.
        Args:
            image_paths (list): list of image file's paths.
            batch_size (int): batch size.

        Returns:
            all_feat (np.ndarray): extracted (N) images' feature. shape: (N, 2048)
    """
    print("loading images...")
    num_images = len(image_paths)
    if num_images < 9999:
        logger.warning(
            f"only {num_images} images found. It may produce inaccurate FID score."
        )
    num_loop, num_remainder = divmod(num_images, batch_size)
    batched_images = image_paths[:-num_remainder]
    rest_image_paths = image_paths[-num_remainder:]

    pbar = tqdm(total=num_images)
    if batch_size > 1 and num_remainder != 0:
        images = im2ndarray(rest_image_paths, imsize=(299, 299))
        feature = get_features(images)
        all_feat = feature.data
        pbar.update(num_remainder)
    else:
        # when batch_size = 1
        all_feat = np.zeros((0, 2048))
        batched_images = rest_image_paths

    for i in range(num_loop):
        image_paths = batched_images[i * batch_size:(i + 1) * batch_size]
        images = im2ndarray(image_paths, imsize=(299, 299))
        feature = get_features(images)
        all_feat = np.concatenate([all_feat, feature.data], axis=0)
        pbar.update(batch_size)

    return all_feat
def convert_image(args):
    file_name = args[0]
    source_dir = args[1]
    dest_dir = args[2]
    width = args[3]
    height = args[4]
    mode = args[5]
    ch = args[6]
    num_class = args[7]
    grid_size = args[8]
    anchors = args[9]

    src_file_name = os.path.join(source_dir, file_name)
    src_label_file_name = os.path.join(
        source_dir, os.path.splitext(file_name)[0] + ".txt")
    image_file_name = os.path.join(
        dest_dir, 'data', os.path.splitext(file_name)[0] + ".png")
    label_file_name = os.path.join(
        dest_dir, 'data', os.path.splitext(file_name)[0] + "_label.csv")
    region_file_name = os.path.join(
        dest_dir, 'data', os.path.splitext(file_name)[0] + "_region.csv")
    try:
        os.makedirs(os.path.dirname(image_file_name))
    except OSError:
        pass  # python2 does not support exists_ok arg
    # print(src_file_name, dest_file_name)

    # open source image
    labels = load_label(src_label_file_name)

    warp_func = None
    try:
        im = imread(src_file_name)
        if len(im.shape) < 2 or len(im.shape) > 3:
            logger.warning(
                "Illegal image file format %s.".format(src_file_name))
            raise
        elif len(im.shape) == 3:
            # RGB image
            if im.shape[2] != 3:
                logger.warning(
                    "The image must be RGB or monochrome.")
                csv_data.remove(data)
                raise

        # resize
        h = im.shape[0]
        w = im.shape[1]
        input_size = (w, h)
        # print(h, w)
        if w != width or h != height:
            # resize image
            if mode == 'trimming':
                # trimming mode
                if float(h) / w > float(height) / width:
                    target_h = int(float(w) / width * height)
                    # print('crop_target_h', target_h)
                    im = im[(h - target_h) // 2:h - (h - target_h) // 2, ::]
                else:
                    target_w = int(float(h) / height * width)
                    # print('crop_target_w', target_w)
                    im = im[::, (w - target_w) // 2:w - (w - target_w) // 2]
                # print('before', im.shape)

                def trim_warp(label, input_size, output_size):
                    w_scale = input_size[0] * 1.0 / output_size[0]
                    h_scale = input_size[1] * 1.0 / output_size[1]
                    label[0] = (label[0] - (1.0 - 1.0 / w_scale)
                                * 0.5) * w_scale
                    label[1] = (label[1] - (1.0 - 1.0 / h_scale)
                                * 0.5) * h_scale
                    label[3] *= w_scale
                    label[4] *= h_scale
                    return label
                warp_func = trim_warp
            elif mode == 'padding':
                # padding mode
                if float(h) / w < float(height) / width:
                    target_h = int(float(height) / width * w)
                    # print('padding_target_h', target_h)
                    pad = (((target_h - h) // 2, target_h -
                            (target_h - h) // 2 - h), (0, 0))
                else:
                    target_w = int(float(width) / height * h)
                    # print('padding_target_w', target_w)
                    pad = ((0, 0), ((target_w - w) // 2,
                                    target_w - (target_w - w) // 2 - w))
                if len(im.shape) == 3:
                    pad = pad + ((0, 0),)
                im = np.pad(im, pad, 'constant')
                # print('before', im.shape)

                def pad_warp(label, input_size, output_size):
                    w_scale = input_size[0] * 1.0 / output_size[0]
                    h_scale = input_size[1] * 1.0 / output_size[1]
                    label[0] = (label[0] * w_scale + (1.0 - w_scale) * 0.5)
                    label[1] = (label[1] * h_scale + (1.0 - h_scale) * 0.5)
                    label[3] *= w_scale
                    label[4] *= h_scale
                    return label
                warp_func = pad_warp
            im = imresize(im, size=(width, height))
            output_size = (width, height)
            # print('after', im.shape)

        # change color ch
        if len(im.shape) == 2 and ch == 3:
            # Monochrome to RGB
            im = np.array([im, im, im]).transpose((1, 2, 0))
        elif len(im.shape) == 3 and ch == 1:
            # RGB to monochrome
            im = np.dot(im[..., :3], [0.299, 0.587, 0.114]).astype(np.uint8)

        # output image
        imsave(image_file_name, im)

    except:
        logger.warning(
            "Failed to convert %s." % (src_file_name))
        raise

    # create label and region file
    if warp_func is not None:
        labels = [warp_func(label, input_size, output_size)
                  for label in labels]
    grid_w = width // grid_size
    grid_h = height // grid_size
    label_array = np.full((len(anchors), grid_h, grid_w), -1, dtype=np.int)
    region_array = np.full(
        (len(anchors), grid_h, grid_w, 4), 0.0, dtype=np.float)

    for label in labels:
        label_rect = ObjectRect(XYWH=label[1:]).clip()

        if label_rect.width() > 0.0 and label_rect.height() > 0.0:
            gx, gy = int(label_rect.centerx() *
                         grid_w), int(label_rect.centery() * grid_h)
            max_iou = 0
            anchor_index = 0
            for i, anchor in enumerate(anchors):
                anchor_rect = ObjectRect(
                    XYWH=[(gx + 0.5) / grid_w, (gy + 0.5) / grid_h, anchor[0], anchor[1]])
                iou = label_rect.iou(anchor_rect)
                if iou > max_iou:
                    anchor_index = i
                    max_iou = iou
            label_array[anchor_index][gy][gx] = int(label[0])
            region_array[anchor_index][gy][gx] = [(label_rect.centerx() - anchor_rect.centerx()) * grid_w + 0.5, (label_rect.centery(
            ) - anchor_rect.centery()) * grid_h + 0.5, np.log(label_rect.width() * grid_w), np.log(label_rect.height() * grid_h)]
    np.savetxt(label_file_name, label_array.reshape(
        (label_array.shape[0] * label_array.shape[1], -1)), fmt='%i', delimiter=',')
    np.savetxt(region_file_name, region_array.reshape(
        (region_array.shape[0] * region_array.shape[1], -1)), fmt='%f', delimiter=',')
def convert_image(args):
    file_name = args[0]
    source_dir = args[1]
    dest_dir = args[2]
    width = args[3]
    height = args[4]
    mode = args[5]
    ch = args[6]

    src_file_name = os.path.join(source_dir, file_name)
    file_name = os.path.splitext(file_name)[0] + ".png"
    dest_file_name = os.path.join(dest_dir, file_name)
    dest_path = os.path.dirname(dest_file_name)
    # print(src_file_name, dest_file_name)

    # open source image
    try:
        im = imread(src_file_name)
        if len(im.shape) < 2 or len(im.shape) > 3:
            logger.warning(
                "Illegal image file format %s.".format(src_file_name))
            raise
        elif len(im.shape) == 3:
            # RGB image
            if im.shape[2] != 3:
                logger.warning("The image must be RGB or monochrome.")
                csv_data.remove(data)
                raise

        # resize
        h = im.shape[0]
        w = im.shape[1]
        # print(h, w)
        if w != width or h != height:
            # resize image
            if mode == 'trimming':
                # trimming mode
                if float(h) / w > float(height) / width:
                    target_h = int(float(w) / width * height)
                    # print('crop_target_h', target_h)
                    im = im[(h - target_h) // 2:h - (h - target_h) // 2, ::]
                else:
                    target_w = int(float(h) / height * width)
                    # print('crop_target_w', target_w)
                    im = im[::, (w - target_w) // 2:w - (w - target_w) // 2]
                # print('before', im.shape)
            elif mode == 'padding':
                # padding mode
                if float(h) / w < float(height) / width:
                    target_h = int(float(height) / width * w)
                    # print('padding_target_h', target_h)
                    pad = (((target_h - h) // 2,
                            target_h - (target_h - h) // 2 - h), (0, 0))
                else:
                    target_w = int(float(width) / height * h)
                    # print('padding_target_w', target_w)
                    pad = ((0, 0), ((target_w - w) // 2,
                                    target_w - (target_w - w) // 2 - w))
                if len(im.shape) == 3:
                    pad = pad + ((0, 0), )
                im = np.pad(im, pad, 'constant')
                # print('before', im.shape)
            im = imresize(im, size=(height, width))
            # print('after', im.shape)

        # change color ch
        if len(im.shape) == 2 and ch == 3:
            # Monochrome to RGB
            im = np.array([im, im, im]).transpose((1, 2, 0))
        elif len(im.shape) == 3 and ch == 1:
            # RGB to monochrome
            im = np.dot(im[..., :3], [0.299, 0.587, 0.114])

        # output
        try:
            os.makedirs(dest_path)
        except OSError:
            pass  # python2 does not support exists_ok arg

        imsave(dest_file_name, im)
    except:
        logger.warning("Failed to convert %s." % (src_file_name))
Ejemplo n.º 9
0
def create_image_classification_dataset_command(args):
    # settings
    source_dir = args.sourcedir
    dest_csv_file_name = [os.path.join(args.outdir, args.file1)]
    if args.file2:
        dest_csv_file_name.append(os.path.join(args.outdir, args.file2))
    dest_dir = args.outdir
    width = int(args.width)
    height = int(args.height)
    padding = args.mode == 'padding'
    ch = int(args.channel)
    shuffle = args.shuffle == 'true'
    test_data_ratio = int(args.ratio2) if args.ratio2 else 0

    if source_dir == dest_dir:
        logger.critical("Input directory and output directory are same.")
        return

    # create file list
    logger.log(99, "Creating file list...")
    dirs = os.listdir(source_dir)
    dirs = [d for d in dirs if os.path.isdir(os.path.join(source_dir, d))]
    dirs.sort()
    # print(dirs)

    labels = []
    label_index = -1
    csv_data = []
    pbar = tqdm.tqdm(total=100, unit='%')
    last = 0
    for i, dir in enumerate(dirs):
        # print(dir)
        full_path = os.path.join(source_dir, dir)
        files = os.listdir(full_path)
        files = [
            f for f in files if os.path.isfile(os.path.join(full_path, f))
        ]
        files.sort()
        found = False
        for i2, file in enumerate(files):
            file_name = os.path.join(full_path, file)
            if imghdr.what(file_name) is not None:
                if not found:
                    labels.append(dir)
                    label_index += 1
                    found = True
                csv_data.append([os.path.join('.', dir, file), label_index])
            current = round(100 * (float(i) / len(dirs) + float(i2) /
                                   (len(dirs) * len(files))))
            if last < current:
                pbar.update(current - last)
                last = current
    pbar.close()

    # create output data
    logger.log(99, "Creating output images...")
    for data in tqdm.tqdm(csv_data, unit='images'):
        src_file_name = os.path.join(source_dir, data[0])
        data[0] = os.path.splitext(data[0])[0] + ".png"
        dest_file_name = os.path.join(dest_dir, data[0])
        dest_path = os.path.dirname(dest_file_name)
        # print(src_file_name, dest_file_name)

        # open source image
        im = scipy.misc.imread(src_file_name)
        if len(im.shape) < 2 or len(im.shape) > 3:
            logger.warning(
                "Illigal image file format %s.".format(src_file_name))
            csv_data.remove(data)
            continue
        elif len(im.shape) == 3:
            # RGB image
            if im.shape[2] != 3:
                logger.warning(
                    "The image must be RGB or monochrome %s.".format(
                        src_file_name))
                csv_data.remove(data)
                continue

        # resize
        h = im.shape[0]
        w = im.shape[1]
        # print(h, w)
        if w != width or h != height:
            # resize image
            if not padding:
                # trimming mode
                if float(h) / w > float(height) / width:
                    target_h = int(float(w) / width * height)
                    # print('crop_target_h', target_h)
                    im = im[(h - target_h) // 2:h - (h - target_h) // 2, ::]
                else:
                    target_w = int(float(h) / height * width)
                    # print('crop_target_w', target_w)
                    im = im[::, (w - target_w) // 2:w - (w - target_w) // 2]
                # print('before', im.shape)
                im = scipy.misc.imresize(arr=im,
                                         size=(height, width),
                                         interp='lanczos')
                # print('after', im.shape)
            else:
                # padding mode
                if float(h) / w < float(height) / width:
                    target_h = int(float(height) / width * w)
                    # print('padding_target_h', target_h)
                    pad = (((target_h - h) // 2,
                            target_h - (target_h - h) // 2 - h), (0, 0))
                else:
                    target_w = int(float(width) / height * h)
                    # print('padding_target_w', target_w)
                    pad = ((0, 0), ((target_w - w) // 2,
                                    target_w - (target_w - w) // 2 - w))
                if len(im.shape) == 3:
                    pad = pad + ((0, 0), )
                im = np.pad(im, pad, 'constant')
                # print('before', im.shape)
                im = scipy.misc.imresize(arr=im,
                                         size=(height, width),
                                         interp='lanczos')
                # print('after', im.shape)

        # change color ch
        if len(im.shape) == 2 and ch == 3:
            # Monochrome to RGB
            im = np.array([im, im, im]).transpose((1, 2, 0))
        elif len(im.shape) == 3 and ch == 1:
            # RGB to monochrome
            im = np.dot(im[..., :3], [0.299, 0.587, 0.114])

        # output
        if not os.path.exists(dest_path):
            os.makedirs(dest_path)
        scipy.misc.imsave(dest_file_name, im)

    logger.log(99, "Creating CSV files...")
    if shuffle:
        import random
        random.shuffle(csv_data)

    csv_data_num = [(len(csv_data) * (100 - test_data_ratio)) // 100]
    csv_data_num.append(len(csv_data) - csv_data_num[0])
    data_head = 0
    for csv_file_name, data_num in zip(dest_csv_file_name, csv_data_num):
        if data_num:
            csv_data_2 = csv_data[data_head:data_head + data_num]
            data_head += data_num

            csv_data_2.insert(0, ['x:image', 'y:label'])
            with open(csv_file_name, 'w') as f:
                writer = csv.writer(f, lineterminator='\n')
                writer.writerows(csv_data_2)
Ejemplo n.º 10
0
def convert_to_nodes(func, variables, input_types, output_types,
                     broadcast_target):
    """Convert a function to a node or a group of nodes"""
    op_type = nnabla_function_type_to_onnx_optype.get(func.type)
    if op_type is None:
        raise ValueError(
            "function {} is currently not supported for ONNX conversion".
            format(func.type))
    n = onnx.helper.make_node(op_type, func.input, func.output, name=func.name)
    nl = []
    if func.type == "Concatenate":
        # ONNX requires axis setting as a parameter
        # for the concat op_type.
        # If no value is set for axis,
        # the default value 0 will be set
        attr = onnx.helper.make_attribute("axis", func.concatenate_param.axis)
        n.attribute.extend([attr])
        nl.append(n)
    elif func.type == "Dropout":
        # NNP Dropout is always is_test=false
        # since we always apply dropout when it is
        # included in a network.
        attr = onnx.helper.make_attribute("is_test", 0)
        n.attribute.extend([attr])
        nl.append(n)
    elif func.type == "MaxPooling":
        mpp = func.max_pooling_param
        if not mpp.ignore_border:
            raise ValueError(
                "MaxPooling with ignore_border=False is not supported")
        # Copy kernel, stride, and pads values
        k = onnx.helper.make_attribute("kernel_shape", mpp.kernel.dim)
        s = onnx.helper.make_attribute("strides", mpp.stride.dim)
        p = onnx.helper.make_attribute("pads", mpp.pad.dim[:] * 2)
        n.attribute.extend([k, s, p])
        nl.append(n)
    elif func.type == "Convolution":
        cp = func.convolution_param
        # Calculate the kernel_shape from input weight data.
        # Weight data should be the second input for convolution
        if len(func.input) < 2:
            raise ValueError(
                "Weight input is missing for convolution {}".format(func.name))
        weight = func.input[1]
        weight_var = [v for v in variables if v.name == weight]
        if len(weight_var) != 1:
            raise ValueError(
                "No weight input was found, or multiple weight inputs were found"
                " for convolution {} where there should be only one.".format(
                    func.name))
        weight_shape = weight_var[0].shape
        # The base axis for weights is the next axis from the data's base axis
        weight_base = cp.base_axis + 1
        k = onnx.helper.make_attribute("kernel_shape",
                                       weight_shape.dim[weight_base:])
        d = onnx.helper.make_attribute("dilations", cp.dilation.dim)
        s = onnx.helper.make_attribute("strides", cp.stride.dim)
        p = onnx.helper.make_attribute("pads", cp.pad.dim[:] * 2)
        g = onnx.helper.make_attribute("group", cp.group)
        n.attribute.extend([k, d, s, p, g])
        nl.append(n)
    elif func.type == "GlobalAveragePooling":
        # We wipeout the node name to avoid a bug?
        # that occurs when we use a GlobalAveragePooling node with a name
        # "Conv" or "Pool" contained.
        # Caffe2 issue is here:
        # https://github.com/caffe2/caffe2/issues/1971
        # Becuase a GlobalAveragePooling operator does not contain a kernel, we get an error at the
        # following code if we have a specific name.
        # https://github.com/caffe2/caffe2/blob/master/caffe2/operators/conv_pool_op_base.h#L167
        # The above caffe2 code should be checking the node's operator name and not the node's name.
        n.name = ""
        nl.append(n)
    elif func.type == "Softmax":
        # Softmax on NNabla does softmax ONLY along the specified axis.
        # ONNX first squashes the input dimensions to 2D based on the specifed axis,
        # and then calculates the Softmax.
        # Since these two slightly differ, we show a warning here.
        logger.warning(SOFTMAX_WARNING)
        attr = onnx.helper.make_attribute("axis", func.softmax_param.axis)
        n.attribute.extend([attr])
        nl.append(n)
    elif func.type == "AveragePooling":
        app = func.average_pooling_param
        if not app.ignore_border:
            raise ValueError(
                "AveragePooling with ignore_border=False is not supported")
        # Copy kernel, stride, and pads values
        k = onnx.helper.make_attribute("kernel_shape", app.kernel.dim)
        s = onnx.helper.make_attribute("strides", app.stride.dim)
        p = onnx.helper.make_attribute("pads", app.pad.dim[:] * 2)
        n.attribute.extend([k, s, p])
        nl.append(n)
    elif func.type == "BatchNormalization":
        # We need to rearrange the input data order.
        # NNabla BatchNormalization input order: X, beta, gamma, mean, variance
        # ONNX BatchNormalization input order: X, scale, bias, mean, variance
        onnx_order = [0, 2, 1, 3, 4]
        if len(func.input) != len(onnx_order):
            raise ValueError(
                "The number of BatchNormalization input must be {}".format(
                    len(onnx_order)))
        onnx_input = [func.input[i] for i in onnx_order]
        del n.input[:]
        n.input.extend(onnx_input)
        bpp = func.batch_normalization_param
        if bpp.batch_stat:
            # Batch normalization for training is currently not supported
            raise ValueError(
                "BatchNormalization with batch_stat=True is currently not supported for ONNX conversion"
            )
        t = onnx.helper.make_attribute("is_test", not bpp.batch_stat)
        attrs = [t]
        # Set values if a valid value has been set
        if bpp.eps != 0.0:
            e = onnx.helper.make_attribute("epsilon", bpp.eps)
            attrs.append(e)
        if bpp.decay_rate != 0.0:
            m = onnx.helper.make_attribute("momentum", bpp.decay_rate)
            attrs.append(m)
        n.attribute.extend(attrs)
        nl.append(n)
    elif func.type == "Reshape":
        # Convert Reshape size to a constant
        rp = func.reshape_param
        x = func.input[0]
        c_out = x + "_shape"
        c = generate_constant(c_out, func.name + "_shape", TensorProto.INT32,
                              [len(rp.shape.dim)], rp.shape.dim)
        nl.append(c)
        # Add resize target shape as the second input
        del n.input[:]
        n.input.extend([x, c_out])
        nl.append(n)
    elif func.type == "Transpose":
        tp = func.transpose_param
        p = onnx.helper.make_attribute("perm", tp.axes)
        n.attribute.extend([p])
        nl.append(n)
    elif func.type == "Affine":
        ap = func.affine_param
        # Broadcast tensor C by default since it's usually a 1D vector
        b = onnx.helper.make_attribute("broadcast", 1)
        n.attribute.extend([b])
        # We need to flatten tensor A to 2D based on the base_axis
        x = func.input[0]
        flout = x + "_flatten"
        fl = onnx.helper.make_node("Flatten", [x], [flout])
        n.input[0] = flout  # rewire input data
        a = onnx.helper.make_attribute("axis", ap.base_axis)
        fl.attribute.extend([a])
        nl.append(fl)
        nl.append(n)
    elif func.type == "BatchMatmul":
        bmp = func.batch_matmul_param
        if bmp.transpose_a or bmp.transpose_b:
            raise ValueError("{} with transpose is not supported yet".format(
                func.type))
        nl.append(n)
    elif func.type == "LeakyReLU":
        lrp = func.leaky_relu_param
        a = onnx.helper.make_attribute("alpha", lrp.alpha)
        n.attribute.extend([a])
        nl.append(n)
    elif func.type == "ELU":
        ep = func.elu_param
        a = onnx.helper.make_attribute("alpha", ep.alpha)
        n.attribute.extend([a])
        nl.append(n)
    elif func.type == "LogicalNot":
        # Store the input/output tensor's name and convert it to boolean
        input_types[n.input[0]] = TensorProto.BOOL
        output_types[n.output[0]] = TensorProto.BOOL
        nl.append(n)
    elif func.type == "SELU":
        sp = func.selu_param
        a = onnx.helper.make_attribute("alpha", sp.alpha)
        g = onnx.helper.make_attribute("gamma", sp.scale)
        n.attribute.extend([a, g])
        nl.append(n)
    elif func.type == "Sum":
        sp = func.sum_param
        set_reduction_attrs(n, sp)
        nl.append(n)
    elif func.type == "Mean":
        mp = func.mean_param
        set_reduction_attrs(n, mp)
        nl.append(n)
    elif func.type == "Max":
        mp = func.max_param
        set_reduction_attrs(n, mp)
        nl.append(n)
    elif func.type == "Min":
        mp = func.min_param
        set_reduction_attrs(n, mp)
        nl.append(n)
    elif func.type == "Prod":
        pp = func.prod_param
        set_reduction_attrs(n, pp)
        nl.append(n)
    elif func.type == "BroadcastTo":
        # BroadcastTo conversion only works when the
        # broadcasted buffer is used as second input for the following:
        # Add, And, Div, Equal, Greater,
        # Less, Mul, Or, Pow, Sub, Xor
        bp = func.broadcast_to_param
        broadcast_target[func.output[0]] = (func.input[1], bp.axis)
        # we do not append node here because BroadcastTo should disappear
    elif (func.type == "Add2" or func.type == "Mul2" or func.type == "Div2"
          or func.type == "Pow2" or func.type == "Sub2"):
        # Check if the second input is a brodcast target.
        bt = func.input[1]
        if bt in broadcast_target:
            merge_broadcast(n, func, bt, broadcast_target)
        nl.append(n)
    elif (func.type == "LogicalAnd" or func.type == "LogicalOr"
          or func.type == "LogicalXor"):
        # Store the input/output tensor's name and convert it to boolean
        input_types[n.input[0]] = TensorProto.BOOL
        output_types[n.output[0]] = TensorProto.BOOL
        # Check if the second input is a brodcast target.
        bt = func.input[1]
        if bt in broadcast_target:
            merged = merge_broadcast(n, func, bt, broadcast_target)
            # Set the merged parameter name as BOOL
            input_types[merged] = TensorProto.BOOL
        else:
            # Set the given parameter name as BOOL
            input_types[n.input[1]] = TensorProto.BOOL
        nl.append(n)
    elif (func.type == "Less" or func.type == "Greater"):
        # Store the output tensor's name and convert it to boolean
        output_types[n.output[0]] = TensorProto.BOOL
        # Check if the second input is a brodcast target.
        bt = func.input[1]
        if bt in broadcast_target:
            merged = merge_broadcast(n, func, bt, broadcast_target)
        nl.append(n)
    elif func.type == "Equal":
        # Get the input data's type.
        # Since ONNX only accepts bool,int32,int64
        # while NNabla does not expose its data type,
        # we default to int64 for now.
        # TODO: Get the correct type information from NNP
        intype = TensorProto.INT64
        # Store the input/output tensor's name and convert it to boolean
        input_types[n.input[0]] = intype
        output_types[n.output[0]] = TensorProto.BOOL
        # Check if the second input is a brodcast target.
        bt = func.input[1]
        if bt in broadcast_target:
            merged = merge_broadcast(n, func, bt, broadcast_target)
            # Set the merged parameter name as BOOL
            input_types[merged] = intype
        else:
            # Set the given parameter name as BOOL
            input_types[n.input[1]] = intype
        nl.append(n)
    elif func.type == "RDivScalar":
        rp = func.r_div_scalar_param
        if rp.val != 1.0:
            raise ValueError(
                "RDivScalar can be converted to Reciprocal only if val is 1")
        nl.append(n)
    elif func.type == "MulScalar":
        mp = func.mul_scalar_param
        if mp.val == -1.0:
            # Convert to Neg
            n.op_type = "Neg"
        else:
            # Convert the scalar param to a Const node and add it with input
            x = func.input[0]
            sval = x + "_scalar"
            c = generate_scalar_constant(sval, func.name + "_scalar", mp.val)
            del n.input[:]
            n.input.extend([x, sval])
            nl.append(c)
            # set broadcast to true
            b = onnx.helper.make_attribute("broadcast", 1)
            n.attribute.extend([b])
        nl.append(n)
    elif func.type == "MinimumScalar":
        msp = func.minimum_scalar_param
        m = onnx.helper.make_attribute("max", msp.val)
        n.attribute.extend([m])
        nl.append(n)
    elif func.type == "MaximumScalar":
        msp = func.maximum_scalar_param
        m = onnx.helper.make_attribute("min", msp.val)
        n.attribute.extend([m])
        nl.append(n)
    elif func.type == "AddScalar":
        asp = func.add_scalar_param
        # Convert the scalar param to a Const node and add it with input
        x = func.input[0]
        sval = x + "_scalar"
        c = generate_scalar_constant(sval, func.name + "_scalar", asp.val)
        nl.append(c)
        del n.input[:]
        n.input.extend([x, sval])
        # set broadcast to true
        b = onnx.helper.make_attribute("broadcast", 1)
        n.attribute.extend([b])
        nl.append(n)
    elif func.type == "PowScalar":
        psp = func.pow_scalar_param
        # Convert the scalar param to a Const node and add it with input
        x = func.input[0]
        sval = x + "_scalar"
        c = generate_scalar_constant(sval, func.name + "_scalar", psp.val)
        nl.append(c)
        del n.input[:]
        n.input.extend([x, sval])
        # set broadcast to true
        b = onnx.helper.make_attribute("broadcast", 1)
        n.attribute.extend([b])
        nl.append(n)
    elif func.type == "SumPooling":
        # SumPooling gets converted to AveragePooling+Mul.
        # Mul is used to counter the division in AveragePooling
        # since SumPooling is just summing the values in each kernel.
        # Copy kernel, stride, and pads values
        spp = func.sum_pooling_param
        if not spp.ignore_border:
            raise ValueError("SumPooling with ignore_border=False"
                             " is not supported")
        attrs = {
            "kernel_shape": spp.kernel.dim,
            "strides": spp.stride.dim,
            "pads": spp.pad.dim[:] * 2
        }
        apin = func.input[0]
        apout = apin + "_ap"
        ap = onnx.helper.make_node("AveragePool", [apin], [apout], **attrs)
        nl.append(ap)
        # Counter the averaging process by multiplying kernel size
        kernel_size = np.prod(spp.kernel.dim)
        mulout = apin + "_kernel"
        c = generate_scalar_constant(mulout, func.name + "_kernel",
                                     kernel_size)
        nl.append(c)
        # Rewire Mul with average pooling output
        del n.input[:]
        n.input.extend([apout, mulout])
        # set broadcast to true
        b = onnx.helper.make_attribute("broadcast", 1)
        n.attribute.extend([b])
        nl.append(n)
    elif func.type == "Pad":
        pp = func.pad_param
        mode_conv = {
            "constant": "constant",
            "replicate": "edge",
            "reflect": "reflect"
        }
        # separate pad values to match ONNX format
        # (S0,E0,S1,E1) => (S0,S1,E0,E1)
        dim = len(pp.pad_width) // 2
        # If we can get the dimension of the input buffer,
        # we get it here. If we cannot, we are assuming 4D input
        in_name = func.input[0]
        in_var = [v for v in variables if v.name == in_name]
        in_dim = 4
        if len(in_var) == 1 and len(in_var[0].shape.dim) > 0:
            # Found variable with valid shape.
            # If the shape dimension is zero, it means
            # that is an intermediate buffer so we can't get
            # the exact dimension at this point
            # (thus assuming 4D input).
            in_dim = len(in_var[0].shape.dim)
        elif len(in_var) > 1:
            raise ValueError("More than one buffer with"
                             " the same buffer name found.")
        zero_dim_num = in_dim - dim
        it = iter(pp.pad_width)
        # We need to fill empty dimensions with zero padding
        # (at least this is what Caffe2 expects)
        starts = [0] * zero_dim_num
        ends = [0] * zero_dim_num
        for x in it:
            starts.append(x)
            ends.append(next(it))
        starts.extend(ends)
        pad = onnx.helper.make_attribute("pads", starts)
        m = onnx.helper.make_attribute("mode", mode_conv[pp.mode])
        v = onnx.helper.make_attribute("value", pp.constant_value)
        n.attribute.extend([pad, m, v])
        nl.append(n)
    else:
        # Simply append node to list
        nl.append(n)
    return nl