コード例 #1
0
ファイル: core.py プロジェクト: balarsen/pysysdevel
def _check_append_library(libraries, item):
    for libitem in libraries:
        if util.is_sequence(libitem):
            if util.is_sequence(item):
                if item[0]==libitem[0]:
                    if item[1] is libitem[1]:
                        return
                    warnings.warn("[0] libraries list contains %r with"
                                  " different build_info" % (item[0],))
                    break
            else:
                if item==libitem[0]:
                    warnings.warn("[1] libraries list contains %r with"
                                  " no build_info" % (item[0],))
                    break
        else:
            if util.is_sequence(item):
                if item[0]==libitem:
                    warnings.warn("[2] libraries list contains %r with"
                                  " no build_info" % (item[0],))
                    break
            else:
                if item==libitem:
                    return
    libraries.append(item)
コード例 #2
0
 def assign_properties(node):
     properties = stylesheet.get().node_properties(node, self)
     style = node.attr('style')
     if style:
         if default_stylesheet_type.get() == 'text/css':
             stream = StringIO.StringIO(style)
             declarations = css.parse_stream(
                 stream, grammar=css.properties_grammar)
             if util.is_sequence(declarations):
                 for d in declarations:
                     properties[d.name] = d.value
             else:
                 errors.append(
                     Stylesheet_Parse_Error(
                         description="Parse error in stylesheet",
                         data='inline'))
         elif not default_stylesheet_type:
             errors.append(
                 No_Stylesheet_Type_Error(
                     node=node,
                     description=
                     "Style without stylesheet type defined",
                     data=style))
         else:
             errors.append(
                 Unknown_Stylesheet_Type_Error(
                     node=node,
                     description="Unknown stylesheet type",
                     data=style))
     node.set_style(properties)
コード例 #3
0
 def test_is_sequence(self):
     self.longMessage = True
     self.assertTrue(util.is_sequence((1, 2, 3)), 'sequence not a sequence')
     self.assertTrue(util.is_sequence(()), 'sequence not a sequence')
     self.assertTrue(util.is_sequence([]), 'sequence not a sequence')
     self.assertTrue(util.is_sequence([1]), 'sequence not a sequence')
     self.assertFalse(util.is_sequence(None), 'non-sequence as sequence')
     self.assertFalse(util.is_sequence(self), 'non-sequence as sequence')
     self.assertFalse(util.is_sequence({}), 'non-sequence as sequence')
コード例 #4
0
ファイル: core.py プロジェクト: playingaround2017/test123
def display_multi(list):
    """**display_multi** (ImageList *list*)

Displays a list of images in a grid-like window.  This function has
no effect if the `Gamera GUI`__ is not running.

.. __: gui.html"""
    # If it's not a list, we'll get errors, so make it one
    if not util.is_sequence(list):
        list = [list]
    if not len(list):
        raise ValueError("Given list is empty.")
    return has_gui.gui.ShowImages(list)
コード例 #5
0
ファイル: location.py プロジェクト: brailcom/wachecker
 def _parse_mime_type(self, mime_type):
     default_mime_type = (
         None,
         None,
     )
     s_mime_type = S.make(mime_type)
     if s_mime_type:
         mime_type = string.split(s_mime_type, '/')
     elif not util.is_sequence(mime_type):
         mime_type = default_mime_type
     if len(mime_type) != 2:
         mime_type = default_mime_type
     return tuple([str(s) for s in mime_type])
コード例 #6
0
ファイル: args.py プロジェクト: elaboris/gamera
 def __init__(self, name=None, range=(-DEFAULT_MAX_ARG_NUMBER, DEFAULT_MAX_ARG_NUMBER), default=None):
    Arg.__init__(self, name)
    if not (util.is_sequence(range) and len(range) == 2 and
            type(range[0]) in (int, float) and type(range[1]) in (int, float)):
       raise TypeError("'range' must be a 2-tuple of numbers")
    self.rng = range
    if default is None:
       self.has_default = False
       self.default = 0
    else:
       self.has_default = True
       self.default = default
    if not isinstance(self.default,CNoneDefault) and type(self.default) != int:
       raise TypeError("'default' must be an int")
コード例 #7
0
ファイル: args.py プロジェクト: playingaround2017/test123
 def __init__(self, name=None, range=None, default=None):
     Arg.__init__(self, name)
     if range is None:
         range = (-DEFAULT_MAX_ARG_NUMBER, DEFAULT_MAX_ARG_NUMBER)
     if not (util.is_sequence(range) and len(range) == 2 and type(range[0])
             in (int, float) and type(range[1]) in (int, float)):
         raise TypeError("'range' must be a 2-tuple of numbers")
     self.rng = range
     if default is None:
         self.has_default = False
         self.default = 0.0
     else:
         self.has_default = True
         self.default = default
     if type(self.default) != float:
         raise TypeError("'default' must be a float")
コード例 #8
0
ファイル: api.py プロジェクト: acdpnk/appdotnet
def _params(param_dict=None, collapse=False):
    """ Given a dict of parameter key/value pairs, filter out the ones
    whose values are None, and comma-join any parameters with lists/tuples.

    :param dict param_dict: the full set of parameters
    :param boolean collapse: if True, collapses lists/tuples to comma-separated
        lists
    :returns: (dict) the refined set of params

    """
    param_dict = param_dict or {}
    params = dict(filter(lambda (k, v): v is not None, param_dict.iteritems()))

    if collapse:
        for key in params:
            if is_sequence(params[key]):
                params[key] = ','.join(params[key])

    return params
コード例 #9
0
ファイル: api.py プロジェクト: 0x1cedd1ce/appdotnet
def _params(param_dict=None, collapse=False):
    """ Given a dict of parameter key/value pairs, filter out the ones
    whose values are None, and comma-join any parameters with lists/tuples.

    :param dict param_dict: the full set of parameters
    :param boolean collapse: if True, collapses lists/tuples to comma-separated
        lists
    :returns: (dict) the refined set of params

    """
    param_dict = param_dict or {}
    params = dict(filter(lambda (k, v): v is not None, param_dict.iteritems()))

    if collapse:
        for key in params:
            if is_sequence(params[key]):
                params[key] = ','.join(params[key])

    return params
コード例 #10
0
ファイル: args.py プロジェクト: elaboris/gamera
 def __init__(self, pixel_types, name=None, list_of=False, default=None):
    import core
    Arg.__init__(self, name)
    if not util.is_sequence(pixel_types):
       pixel_types = (pixel_types,)
    if not util.is_homogenous_list(pixel_types, (int,)):
       raise TypeError("'pixel_types' must be a list of integers.")
    if not core is None:
       self.klass = core.ImageBase
    else:
       self.klass = None
    self.pixel_types = pixel_types
    self.list_of = bool(list_of)
    if default is None:
       self.has_default = False
       self.default = None
    else:
       self.has_default = True
       self.default = default
    if self.has_default and not isinstance(self.default,CNoneDefault):
       raise TypeError("'default' can only be NoneDefault")
コード例 #11
0
ファイル: args.py プロジェクト: playingaround2017/test123
 def __init__(self, pixel_types, name=None, list_of=False, default=None):
     import core
     Arg.__init__(self, name)
     if not util.is_sequence(pixel_types):
         pixel_types = (pixel_types, )
     if not util.is_homogenous_list(pixel_types, (int, )):
         raise TypeError("'pixel_types' must be a list of integers.")
     if not core is None:
         self.klass = core.ImageBase
     else:
         self.klass = None
     self.pixel_types = pixel_types
     self.list_of = bool(list_of)
     if default is None:
         self.has_default = False
         self.default = None
     else:
         self.has_default = True
         self.default = default
     if self.has_default and not isinstance(self.default, CNoneDefault):
         raise TypeError("'default' can only be NoneDefault")
コード例 #12
0
ファイル: pyplate.py プロジェクト: alan0526/Gamera
class ForTemplateNode(TemplateNode):
    def __init__(self, parent, s):
        TemplateNode.__init__(self, parent, s)
        match = re_for_loop.match(s)
        if match == None:
            raise self.parent.parser_exception(
                "[[%s]] is not a valid for-loop expression" % self.s)
        else:
            self.vars_temp = match.group(1).split(",")
            self.vars = []
            for v in self.vars_temp:
                self.vars.append(v.strip())
            self.expression = match.group(2)

    def execute(self, stream, data):
        remember_vars = {}
        for var in self.vars:
            if data.has_key(var):
                remember_vars[var] = data[var]
        try:
            x = eval(self.expression, globals(), data)
        except Exception, e:
            self.parent.parser_exception(self.expression, e)
        for list in x:
            if util.is_sequence(list):
                if len(self.vars) == 1:
                    data[self.vars[0]] = list
                elif len(self.vars) == len(list):
                    for index, value in util.enumerate(list):
                        data[self.vars[index]] = value
                else:
                    self.parent.parser_exception(
                        "Unable to unpack tuples in [[%s]]" % self.s)
            else:
                data[self.vars[0]] = list
            TemplateNode.execute(self, stream, data)
        for key, value in remember_vars.items():
            data[key] = value
コード例 #13
0
ファイル: build_shlib.py プロジェクト: balarsen/pysysdevel
    def build_a_library(self, build_info, lib_name, libraries):
        libraries = convert_ulist(build_info.get("libraries") or [])
        library_dirs = convert_ulist(build_info.get("library_dirs") or [])
        runtime_library_dirs = convert_ulist(build_info.get("runtime_library_dirs"))
        extra_preargs = build_info.get("extra_compiler_args") or []
        extra_postargs = build_info.get("extra_link_args") or []

        ## Conditional recompile
        target_library = self.compiler.library_filename(lib_name, lib_type="shared", output_dir="")
        target_path = os.path.join(self.build_clib, target_library)
        recompile = False
        if not os.path.exists(target_path) or self.force:
            recompile = True
        else:
            for src in build_info.get("sources"):
                if os.path.getmtime(target_path) < os.path.getmtime(src):
                    recompile = True
                    break
        if not recompile:
            return

        library_dirs += [self.build_clib]

        ########################################
        ## Copied verbatim from numpy.distutils.command.build_clib

        # default compilers
        compiler = self.compiler
        fcompiler = getattr(self, "_f_compiler", self.fcompiler)

        sources = build_info.get("sources")
        if sources is None or not is_sequence(sources):
            raise DistutilsSetupError, (
                "in 'libraries' option (library '%s'), "
                + "'sources' must be present and must be "
                + "a list of source filenames"
            ) % lib_name
        sources = list(sources)

        c_sources, cxx_sources, f_sources, fmodule_sources = filter_sources(sources)
        requiref90 = not not fmodule_sources or build_info.get("language", "c") == "f90"

        # save source type information so that build_ext can use it.
        source_languages = []
        if c_sources:
            source_languages.append("c")
        if cxx_sources:
            source_languages.append("c++")
        if requiref90:
            source_languages.append("f90")
        elif f_sources:
            source_languages.append("f77")

        specified = build_info.get("language", None)
        if specified:
            source_languages.append(specified)
            if specified == "c++":  ## force c++ compiler
                cxx_sources += c_sources
                c_sources = []
        build_info["source_languages"] = source_languages

        lib_file = compiler.library_filename(lib_name, lib_type="shared", output_dir=self.build_clib)
        depends = sources + build_info.get("depends", [])
        if not (self.force or newer_group(depends, lib_file, "newer")):
            log.debug("skipping '%s' library (up-to-date)", lib_name)
            return
        else:
            log.info("building '%s' library", lib_name)

        if have_numpy:
            config_fc = build_info.get("config_fc", {})
            if fcompiler is not None and config_fc:
                log.info("using additional config_fc from setup script " "for fortran compiler: %s" % (config_fc,))
                from numpy.distutils.fcompiler import new_fcompiler

                fcompiler = new_fcompiler(
                    compiler=fcompiler.compiler_type,
                    verbose=self.verbose,
                    dry_run=self.dry_run,
                    force=self.force,
                    requiref90=requiref90,
                    c_compiler=self.compiler,
                )
                if fcompiler is not None:
                    dist = self.distribution
                    base_config_fc = dist.get_option_dict("config_fc").copy()
                    base_config_fc.update(config_fc)
                    fcompiler.customize(base_config_fc)

            # check availability of Fortran compilers
            if (f_sources or fmodule_sources) and fcompiler is None:
                ver = "77"
                if requiref90:
                    ver = "90"
                raise DistutilsError, "library %s has Fortran%s sources" " but no Fortran compiler found" % (
                    lib_name,
                    ver,
                )
            """
            if fcompiler is not None:
                fcompiler.extra_f77_compile_args = build_info.get('extra_f77_compile_args') or []
                fcompiler.extra_f90_compile_args = build_info.get('extra_f90_compile_args') or []
                """

        macros = build_info.get("macros")
        include_dirs = build_info.get("include_dirs")
        if include_dirs is None:
            include_dirs = []
        extra_postargs = build_info.get("extra_compiler_args") or []

        if have_numpy:
            include_dirs.extend(get_numpy_include_dirs())
        # where compiled F90 module files are:
        module_dirs = build_info.get("module_dirs") or []
        module_build_dir = os.path.dirname(lib_file)
        if requiref90:
            self.mkpath(module_build_dir)

        if compiler.compiler_type == "msvc":
            # this hack works around the msvc compiler attributes
            # problem, msvc uses its own convention :(
            c_sources += cxx_sources
            cxx_sources = []

        objects = []
        if c_sources:
            log.info("compiling C sources")
            objects = compiler.compile(
                c_sources,
                output_dir=self.build_temp,
                macros=macros,
                include_dirs=include_dirs,
                debug=self.debug,
                extra_postargs=extra_postargs,
            )

        if cxx_sources:
            log.info("compiling C++ sources")
            cxx_compiler = compiler.cxx_compiler()
            cxx_objects = cxx_compiler.compile(
                cxx_sources,
                output_dir=self.build_temp,
                macros=macros,
                include_dirs=include_dirs,
                debug=self.debug,
                extra_postargs=extra_postargs,
            )
            objects.extend(cxx_objects)

        if f_sources or fmodule_sources:
            if not have_numpy:
                raise Exception("Fortran sources, but no NumPy to compile them.")
            extra_postargs = []
            f_objects = []

            if requiref90:
                if fcompiler.module_dir_switch is None:
                    existing_modules = glob("*.mod")
                extra_postargs += fcompiler.module_options(module_dirs, module_build_dir)

            if fmodule_sources:
                log.info("compiling Fortran 90 module sources")
                f_objects += fcompiler.compile(
                    fmodule_sources,
                    output_dir=self.build_temp,
                    macros=macros,
                    include_dirs=include_dirs,
                    debug=self.debug,
                    extra_postargs=extra_postargs,
                )

            if requiref90 and fcompiler.module_dir_switch is None:
                # move new compiled F90 module files to module_build_dir
                for f in glob("*.mod"):
                    if f in existing_modules:
                        continue
                    t = os.path.join(module_build_dir, f)
                    if os.path.abspath(f) == os.path.abspath(t):
                        continue
                    if os.path.isfile(t):
                        os.remove(t)
                    try:
                        self.move_file(f, module_build_dir)
                    except DistutilsFileError:
                        log.warn("failed to move %r to %r" % (f, module_build_dir))

            if f_sources:
                log.info("compiling Fortran sources")
                f_objects += fcompiler.compile(
                    f_sources,
                    output_dir=self.build_temp,
                    macros=macros,
                    include_dirs=include_dirs,
                    debug=self.debug,
                    extra_postargs=extra_postargs,
                )
        else:
            f_objects = []

        objects.extend(f_objects)

        # assume that default linker is suitable for
        # linking Fortran object files
        ########################################

        link_compiler = compiler
        if cxx_sources:
            link_compiler = cxx_compiler
        extra_postargs = build_info.get("extra_link_args") or []

        ## May be dependent on other libs we're builing
        shlib_libraries = []
        for libinfo in build_info.get("libraries", []):
            if isinstance(libinfo, basestring):
                shlib_libraries.append(convert_ulist([libinfo])[0])
            else:
                shlib_libraries.append(libinfo[0])

        ## Alternate ending
        link_compiler.link(
            target_desc=link_compiler.SHARED_LIBRARY,
            objects=objects,
            output_filename=target_library,
            output_dir=self.build_clib,
            libraries=shlib_libraries,
            library_dirs=library_dirs,
            runtime_library_dirs=runtime_library_dirs,
            debug=self.debug,
            extra_preargs=extra_preargs,
            extra_postargs=extra_postargs,
        )
コード例 #14
0
def function(*args, **kwargs):
    assert 'size' in kwargs, "Need size information"
    assert 'name' in kwargs, "Need name for output"

    assert len(args) > 0, "Empty arguments in function {}".format(
        kwargs["name"])

    size = kwargs["size"]
    name = kwargs["name"]

    config = kwargs.get("config", {})
    for k, v in config.iteritems():
        if not k in kwargs:
            kwargs[k] = v
    user_act = kwargs.get("act")
    use_bias = kwargs.get("use_bias", True)
    weight_factor = kwargs.get("weight_factor", 1.0)
    use_weight_norm = kwargs.get("use_weight_norm", False)
    layers_num = kwargs.get("layers_num")
    reuse = kwargs.get("reuse", False)
    use_batch_norm = kwargs.get("use_batch_norm", False)
    scope_name = kwargs.get("scope_name", "")
    if scope_name:
        name = "{}/{}".format(scope_name, name)
    if use_weight_norm:
        use_bias = False

    epsilon = 1e-03

    if not is_sequence(size):
        size = (size, )
        if not layers_num is None:
            size = size * layers_num

    if layers_num is None:
        layers_num = len(size)
    else:
        assert layers_num == len(
            size
        ), "Got layers num not matched with size information. layers_num: {}, size: {}".format(
            layers_num, size)

    act = None
    if user_act:
        act = user_act

    assert not act is None or use_weight_norm == False, "Can't use batch normalization with linear activation function"

    with tf.variable_scope(name, reuse=reuse) as scope:
        inputs = args

        for l_id in xrange(layers_num):
            nout = size[l_id]
            layer_out = None  #tf.zeros([None, nout], dtype=tf.float32)

            for idx, a in enumerate(inputs):
                a_shape = a.get_shape().as_list()

                nin = a_shape[-1]

                init = lambda shape, dtype, partition_info: xavier_init(
                    nin, nout, const=weight_factor)
                vec_init = lambda shape, dtype, partition_info: xavier_vec_init(
                    nout, const=weight_factor)
                zeros_init = lambda shape, dtype, partition_info: np.zeros(
                    (nout, ))
                ones_init = lambda shape, dtype, partition_info: np.ones(
                    (nout, ))

                if not use_weight_norm:
                    w = tf.get_variable("W{}-{}".format(l_id, idx),
                                        [nin, nout],
                                        dtype=tf.float32,
                                        initializer=init)
                    a_w = tf.matmul(a, w)
                else:
                    V = tf.get_variable(
                        "V{}-{}".format(l_id, idx), [nin, nout],
                        dtype=tf.float32,
                        initializer=init
                    )  #tf.uniform_unit_scaling_initializer(factor=weight_factor))
                    g = tf.get_variable("g{}-{}".format(l_id, idx), [nout],
                                        dtype=tf.float32,
                                        initializer=vec_init)

                    a_w = tf.matmul(a, V)
                    a_w = a_w * g / tf.sqrt(tf.reduce_sum(tf.square(V), [0]))

                if use_bias:
                    b = tf.get_variable("b{}-{}".format(l_id, idx), [nout],
                                        tf.float32,
                                        initializer=zeros_init)
                    a_w = a_w + b

                if layer_out is None:
                    layer_out = a_w
                else:
                    layer_out = layer_out + a_w

            if use_batch_norm:
                batch_mean, batch_var = tf.nn.moments(layer_out, [0])
                layer_out = (layer_out - batch_mean) / tf.sqrt(batch_var +
                                                               epsilon)

                gamma = tf.get_variable("gamma{}".format(l_id), [nout],
                                        dtype=tf.float32,
                                        initializer=ones_init)
                beta = tf.get_variable("beta{}".format(l_id), [nout],
                                       dtype=tf.float32,
                                       initializer=zeros_init)

                layer_out = gamma * layer_out + beta

            inputs = (act(layer_out) if act else layer_out, )

    return inputs[0]
コード例 #15
0
ファイル: core.py プロジェクト: balarsen/pysysdevel
            else:
                if item==libitem[0]:
                    warnings.warn("[1] libraries list contains %r with"
                                  " no build_info" % (item[0],))
                    break
        else:
            if util.is_sequence(item):
                if item[0]==libitem:
                    warnings.warn("[2] libraries list contains %r with"
                                  " no build_info" % (item[0],))
                    break
            else:
                if item==libitem:
                    return
    libraries.append(item)

def _check_append_ext_library(libraries, (lib_name,build_info)):
    for item in libraries:
        if util.is_sequence(item):
            if item[0]==lib_name:
                if item[1] is build_info:
                    return
                warnings.warn("[3] libraries list contains %r with"
                              " different build_info" % (lib_name,))
                break
        elif item==lib_name:
            warnings.warn("[4] libraries list contains %r with"
                          " no build_info" % (lib_name,))
            break
    libraries.append((lib_name,build_info))
コード例 #16
0
ファイル: function.py プロジェクト: alexeyche/alexeyche-junk
def function(*args, **kwargs):
    assert 'size' in kwargs, "Need size information"
    assert 'name' in kwargs, "Need name for output"

    assert len(args) > 0, "Empty arguments in function {}".format(kwargs["name"])

    size = kwargs["size"]
    name = kwargs["name"]


    config = kwargs.get("config", {})
    for k, v in config.iteritems():
        if not k in kwargs:
            kwargs[k] = v
    user_act = kwargs.get("act")
    use_bias = kwargs.get("use_bias", True)
    weight_factor = kwargs.get("weight_factor", 1.0)
    use_weight_norm = kwargs.get("use_weight_norm", False)
    layers_num = kwargs.get("layers_num")
    reuse = kwargs.get("reuse", False)
    use_batch_norm = kwargs.get("use_batch_norm", False)
    scope_name = kwargs.get("scope_name", "")
    if scope_name:
        name = "{}/{}".format(scope_name, name)
    if use_weight_norm:
        use_bias = False
    
    epsilon = 1e-03

    if not is_sequence(size):
        size = (size,)
        if not layers_num is None:
            size = size*layers_num

    if layers_num is None:
        layers_num = len(size)
    else:
        assert layers_num == len(size), "Got layers num not matched with size information. layers_num: {}, size: {}".format(layers_num, size)
    

    act = None
    if user_act:
        act = user_act
    
    assert not act is None or use_weight_norm == False, "Can't use batch normalization with linear activation function"

    with tf.variable_scope(name, reuse=reuse) as scope:
        inputs = args

        for l_id in xrange(layers_num):
            nout = size[l_id]
            layer_out = None #tf.zeros([None, nout], dtype=tf.float32)

            for idx, a in enumerate(inputs):
                a_shape = a.get_shape().as_list()

                nin = a_shape[-1]

                init = lambda shape, dtype, partition_info: xavier_init(nin, nout, const = weight_factor)
                vec_init = lambda shape, dtype, partition_info: xavier_vec_init(nout, const = weight_factor)
                zeros_init = lambda shape, dtype, partition_info: np.zeros((nout,))
                ones_init = lambda shape, dtype, partition_info: np.ones((nout,))
                
                if not use_weight_norm:
                    w = tf.get_variable("W{}-{}".format(l_id, idx), [nin, nout], dtype = tf.float32, initializer = init)
                    a_w = tf.matmul(a, w)
                else:
                    V = tf.get_variable("V{}-{}".format(l_id, idx), [nin, nout], dtype = tf.float32, initializer = init) #tf.uniform_unit_scaling_initializer(factor=weight_factor))
                    g = tf.get_variable("g{}-{}".format(l_id, idx), [nout], dtype = tf.float32, initializer = vec_init)

                    a_w = tf.matmul(a, V)
                    a_w = a_w * g/tf.sqrt(tf.reduce_sum(tf.square(V),[0]))

                if use_bias:
                    b = tf.get_variable("b{}-{}".format(l_id, idx), [nout], tf.float32, initializer = zeros_init)
                    a_w = a_w + b

                if layer_out is None:
                    layer_out = a_w
                else:
                    layer_out = layer_out + a_w
            
            if use_batch_norm:
                batch_mean, batch_var = tf.nn.moments(layer_out, [0])
                layer_out = (layer_out - batch_mean) / tf.sqrt(batch_var + epsilon)

                gamma = tf.get_variable("gamma{}".format(l_id), [nout], dtype = tf.float32, initializer = ones_init)
                beta = tf.get_variable("beta{}".format(l_id), [nout], dtype = tf.float32, initializer = zeros_init)
                
                layer_out = gamma * layer_out + beta

            inputs = (act(layer_out) if act else layer_out,)

    return inputs[0]
コード例 #17
0
ファイル: build_exe.py プロジェクト: balarsen/pysysdevel
    def run(self):
        if not self.executables:
            return

        # Make sure that library sources are complete.
        languages = []
        for exe in self.executables:
            if not all_strings(exe.sources):
                self.run_command('build_src')
            l = exe.language
            if l and l not in languages: languages.append(l)

        from distutils.ccompiler import new_compiler
        self.compiler = new_compiler(compiler=self.compiler,
                                     dry_run=self.dry_run,
                                     force=self.force)
        self.compiler.customize(self.distribution,
                                need_cxx=self.have_cxx_sources())

        self.compiler.customize_cmd(self)
        self.compiler.show_customization()

        if have_numpy and self.have_f_sources():
            from numpy.distutils.fcompiler import new_fcompiler
            self.fcompiler = new_fcompiler(compiler=self.fcompiler,
                                           verbose=self.verbose,
                                           dry_run=self.dry_run,
                                           force=self.force,
                                           requiref90='f90' in languages,
                                           c_compiler=self.compiler)
            if self.fcompiler is not None:
                self.fcompiler.customize(self.distribution)

                self.fcompiler.customize_cmd(self)
                self.fcompiler.show_customization()



        exes = []
        for exe in self.executables:
            libraries = exe.libraries or []
            library_dirs = exe.library_dirs or []
            runtime_library_dirs = exe.runtime_library_dirs or []
            extra_preargs = exe.extra_compile_args or []
            extra_postargs = exe.extra_link_args or []

            ## include libraries built by build_shlib and/or build_clib
            library_dirs.append(self.build_temp)

            ## Conditional recompile
            build_directory = self.build_clib
            target_path = os.path.join(build_directory, exe.name)
            recompile = False
            if not os.path.exists(target_path) or self.force:
                recompile = True
            else:
                for src in exe.sources:
                    if os.path.getmtime(target_path) < os.path.getmtime(src):
                        recompile = True
                        break
            if not recompile:
                return

            ########################################
            ## Copied from numpy.distutils.command.build_clib

            # default compilers
            compiler = self.compiler
            fcompiler = self.fcompiler

            sources = exe.sources
            if sources is None or not is_sequence(sources):
                raise DistutilsSetupError, \
                    ("in 'libraries' option (library '%s'), " +
                     "'sources' must be present and must be " +
                     "a list of source filenames") % exe.name
            sources = list(sources)

            c_sources, cxx_sources, f_sources, fmodule_sources \
                = filter_sources(sources)
            if not exe.language:
                exe.language = 'c'
            requiref90 = not not fmodule_sources or exe.language =='f90'

            # save source type information so that build_ext can use it.
            source_languages = []
            if c_sources: source_languages.append('c')
            if cxx_sources: source_languages.append('c++')
            if requiref90: source_languages.append('f90')
            elif f_sources: source_languages.append('f77')
            exe.source_languages = source_languages

            lib_file = compiler.library_filename(exe.name,
                                                 output_dir=build_directory)
            depends = sources + (exe.depends or [])
            if not (self.force or newer_group(depends, lib_file, 'newer')):
                log.debug("skipping '%s' library (up-to-date)", exe.name)
                return
            else:
                log.info("building '%s' library", exe.name)

            if have_numpy:
                config_fc = exe.config_fc or {}
                if fcompiler is not None and config_fc:
                    log.info('using additional config_fc from setup script '\
                                 'for fortran compiler: %s' \
                                 % (config_fc,))
                    from numpy.distutils.fcompiler import new_fcompiler
                    fcompiler = new_fcompiler(compiler=fcompiler.compiler_type,
                                              verbose=self.verbose,
                                              dry_run=self.dry_run,
                                              force=self.force,
                                              requiref90=requiref90,
                                              c_compiler=self.compiler)
                    if fcompiler is not None:
                        dist = self.distribution
                        base_config_fc = dist.get_option_dict('config_fc').copy()
                        base_config_fc.update(config_fc)
                        fcompiler.customize(base_config_fc)

                # check availability of Fortran compilers
                if (f_sources or fmodule_sources) and fcompiler is None:
                    raise DistutilsError, "library %s has Fortran sources"\
                        " but no Fortran compiler found" % (exe.name)

            macros = exe.define_macros
            include_dirs = exe.include_dirs
            if include_dirs is None:
                include_dirs = []

            if have_numpy:
                include_dirs.extend(get_numpy_include_dirs())
            # where compiled F90 module files are:
            module_dirs = exe.module_dirs or []
            module_build_dir = os.path.dirname(lib_file)
            if requiref90: self.mkpath(module_build_dir)

            if compiler.compiler_type=='msvc':
                # this hack works around the msvc compiler attributes
                # problem, msvc uses its own convention :(
                c_sources += cxx_sources
                cxx_sources = []

            objects = []
            if c_sources:
                log.info("compiling C sources")
                objects = compiler.compile(c_sources,
                                           output_dir=self.build_temp,
                                           macros=macros,
                                           include_dirs=include_dirs,
                                           debug=self.debug,
                                           extra_postargs=extra_postargs)

            if cxx_sources:
                log.info("compiling C++ sources")
                cxx_compiler = compiler.cxx_compiler()
                cxx_objects = cxx_compiler.compile(cxx_sources,
                                                   output_dir=self.build_temp,
                                                   macros=macros,
                                                   include_dirs=include_dirs,
                                                   debug=self.debug,
                                                   extra_postargs=extra_postargs)
                objects.extend(cxx_objects)

            if f_sources or fmodule_sources:
                extra_postargs = []
                f_objects = []

                if requiref90:
                    if fcompiler.module_dir_switch is None:
                        existing_modules = glob('*.mod')
                    extra_postargs += fcompiler.module_options(\
                        module_dirs,module_build_dir)

                if fmodule_sources:
                    log.info("compiling Fortran 90 module sources")
                    f_objects += fcompiler.compile(fmodule_sources,
                                                   output_dir=self.build_temp,
                                                   macros=macros,
                                                   include_dirs=include_dirs,
                                                   debug=self.debug,
                                                   extra_postargs=extra_postargs)

                if requiref90 and self.fcompiler.module_dir_switch is None:
                    # move new compiled F90 module files to module_build_dir
                    for f in glob('*.mod'):
                        if f in existing_modules:
                            continue
                        t = os.path.join(module_build_dir, f)
                        if os.path.abspath(f)==os.path.abspath(t):
                            continue
                        if os.path.isfile(t):
                            os.remove(t)
                        try:
                            self.move_file(f, module_build_dir)
                        except DistutilsFileError:
                            log.warn('failed to move %r to %r' \
                                         % (f, module_build_dir))

                if f_sources:
                    log.info("compiling Fortran sources")
                    f_objects += fcompiler.compile(f_sources,
                                                   output_dir=self.build_temp,
                                                   macros=macros,
                                                   include_dirs=include_dirs,
                                                   debug=self.debug,
                                                   extra_postargs=extra_postargs)
            else:
                f_objects = []

            objects.extend(f_objects)

            # assume that default linker is suitable for
            # linking Fortran object files
            ########################################

            if exe.link_with_fcompiler: # if using PROGRAM
                link_compiler = fcompiler
            else:
                link_compiler = compiler
            if cxx_sources:
                link_compiler = cxx_compiler

            ## May be dependent on other libs we're builing
            shlib_libraries = []
            for libinfo in exe.libraries:
                if isinstance(libinfo, basestring):
                    shlib_libraries.append(convert_ulist([libinfo])[0])
                else:
                    shlib_libraries.append(libinfo[0])

            if not hasattr(link_compiler, 'linker_exe') or \
                    link_compiler.linker_exe is None:
                link_compiler.linker_exe = [link_compiler.linker_so[0]]

            linker_args = dict(
                target_desc          = link_compiler.EXECUTABLE,
                objects              = objects,
                output_filename      = exe.name,
                output_dir           = build_directory,
                libraries            = shlib_libraries,
                library_dirs         = library_dirs,
                debug                = self.debug,
                extra_preargs        = extra_preargs,
                extra_postargs       = extra_postargs,
                )
            if not exe.link_with_fcompiler:
                linker_args['runtime_library_dirs'] = runtime_library_dirs

            ## Alternate ending
            link_compiler.link(**linker_args)
コード例 #18
0
ファイル: core.py プロジェクト: balarsen/pysysdevel
def setup(**attr):
    if len(sys.argv)<=1 and not attr.get('script_args',[]):
        try:
            from numpy.distutils.interactive import interactive_sys_argv
            from numpy.distutils.core import _exit_interactive_session
            import atexit
            atexit.register(_exit_interactive_session)
            sys.argv[:] = interactive_sys_argv(sys.argv)
            if len(sys.argv)>1:
                return setup(**attr)
        except:
            pass

    cmdclass = my_cmdclass.copy()

    new_attr = attr.copy()
    if 'cmdclass' in new_attr:
        cmdclass.update(new_attr['cmdclass'])
    new_attr['cmdclass'] = cmdclass

    if 'configuration' in new_attr:
        # To avoid calling configuration if there are any errors
        # or help request in command in the line.
        configuration = new_attr.pop('configuration')

        old_dist = distutils.core._setup_distribution
        old_stop = distutils.core._setup_stop_after
        distutils.core._setup_distribution = None
        distutils.core._setup_stop_after = "commandline"
        try:
            dist = setup(**new_attr)
        finally:
            distutils.core._setup_distribution = old_dist
            distutils.core._setup_stop_after = old_stop
        if dist.help or not _command_line_ok():
            # probably displayed help, skip running any commands
            return dist

        # create setup dictionary and append to new_attr
        config = configuration()
        if hasattr(config,'todict'):
            config = config.todict()
        _dict_append(new_attr, **config)

    # Move extension source libraries to libraries
    libraries = []
    for ext in new_attr.get('ext_modules',[]):
        new_libraries = []
        for item in ext.libraries:
            #[item] = util.convert_ulist([item])
            if util.is_sequence(item):
                lib_name, build_info = item
                _check_append_ext_library(libraries, item)
                new_libraries.append(lib_name)
            elif util.is_string(item):
                new_libraries.append(item)
            else:
                raise TypeError("invalid description of extension module "
                                "library %r" % (item,))
        ext.libraries = new_libraries
    if libraries:
        if 'libraries' not in new_attr:
            new_attr['libraries'] = []
        for item in libraries:
            _check_append_library(new_attr['libraries'], item)

    # sources in ext_modules or libraries may contain header files
    if ('ext_modules' in new_attr or 'libraries' in new_attr) \
       and 'headers' not in new_attr:
        new_attr['headers'] = []

    # Use our custom Distribution class instead of distutils' one
    new_attr['distclass'] = CustomDistribution

    return old_setup(**new_attr)