Example #1
0
    def learn(self, my_dict, aplot=None):
        if my_dict is None:
            logging.critical("Cannot learn function with empty dict")
            return lambda _: 1, 0
        d_dict = dict()
        samples, thresholds = [], []
        for k, v in six.iteritems(my_dict):
            for o in (_ for _ in v if _):
                dnearest = np.array(np.load("{}.npz".format(o))['X']).reshape(
                    -1, 1)
                var = np.var(dnearest)
                if var == 0:
                    continue
                med = np.median(dnearest)
                mean, _, _, h = mean_confidence_interval(dnearest)
                samples.append(dnearest.shape[0])
                d_dict.setdefault(o.split('/')[0], dict()).setdefault(k, [med, h])

                # for the threshold, fit a gaussian (unused for AP)
                thresholds.append(_gaussian_fit(dnearest))
        if len(d_dict) < 1:
            logging.critical("dictionary is empty")
            return lambda _: 1, 0
        for k, v in six.iteritems(d_dict):  # there is only one
            xdata = np.array(sorted(v))
            ydata = np.array([np.mean(v[x][0]) for x in xdata])
            yerr = np.array([np.mean(v[x][1]) for x in xdata])

        # Take only significant values, higher than 0
        mask = ydata > 0
        xdata = xdata[mask]
        if xdata.shape[0] < 2:
            logging.critical("Too few points to learn function")
            # no correction can be applied
            return lambda _: 1, 0

        ydata = ydata[mask]
        ydata = ydata[0] / ydata  # normalise
        yerr = yerr[mask]

        order = min(self.order, xdata.shape[0] - 1)
        warnings.filterwarnings("ignore")
        with warnings.catch_warnings():
            warnings.filterwarnings('error')
            try:
                poly = np.poly1d(np.polyfit(
                    xdata, ydata, order, w=1. / (yerr + 1e-15)))
            except np.RankWarning:
                logging.critical(
                    "Cannot fit polynomial with degree %d, npoints %d",
                    order, xdata.shape[0])
                return lambda _: 1, 0

        if self.aplot is not None:
            plot_learning_function(xdata, ydata, yerr, order, self.aplot, poly)

        # poly = partial(model, res.x)
        return poly, 1 - (filter(
            lambda x: x > 0,
            np.array(thresholds)[np.array(samples).argsort()[::-1]]) or [0])[0]
    def test_array_richcompare_legacy_weirdness(self):
        # It doesn't really work to use assert_deprecated here, b/c part of
        # the point of assert_deprecated is to check that when warnings are
        # set to "error" mode then the error is propagated -- which is good!
        # But here we are testing a bunch of code that is deprecated *because*
        # it has the habit of swallowing up errors and converting them into
        # different warnings. So assert_warns will have to be sufficient.
        assert_warns(FutureWarning, lambda: np.arange(2) == "a")
        assert_warns(FutureWarning, lambda: np.arange(2) != "a")
        # No warning for scalar comparisons
        with warnings.catch_warnings():
            warnings.filterwarnings("error")
            assert_(not (np.array(0) == "a"))
            assert_(np.array(0) != "a")
            assert_(not (np.int16(0) == "a"))
            assert_(np.int16(0) != "a")

        for arg1 in [np.asarray(0), np.int16(0)]:
            struct = np.zeros(2, dtype="i4,i4")
            for arg2 in [struct, "a"]:
                for f in [operator.lt, operator.le, operator.gt, operator.ge]:
                    if sys.version_info[0] >= 3:
                        # py3
                        with warnings.catch_warnings() as l:
                            warnings.filterwarnings("always")
                            assert_raises(TypeError, f, arg1, arg2)
                            assert_(not l)
                    else:
                        # py2
                        assert_warns(DeprecationWarning, f, arg1, arg2)
    def test_filteredOnceWarning(self):
        """
        L{deprecate.warnAboutFunction} emits a warning that will be filtered
        once if L{warnings.filterwarning} is called with the module name of the
        deprecated function and an action of once.
        """
        # Clean up anything *else* that might spuriously filter out the warning,
        # such as the "always" simplefilter set up by unittest._collectWarnings.
        # We'll also rely on trial to restore the original filters afterwards.
        del warnings.filters[:]

        warnings.filterwarnings(
            action="module", module="twisted_private_helper")

        from twisted_private_helper import module
        module.callTestFunction()
        module.callTestFunction()

        warningsShown = self.flushWarnings()
        self.assertEqual(len(warningsShown), 1)
        message = warningsShown[0]['message']
        category = warningsShown[0]['category']
        filename = warningsShown[0]['filename']
        lineno = warningsShown[0]['lineno']
        msg = warnings.formatwarning(message, category, filename, lineno)
        self.assertTrue(
            msg.endswith("module.py:9: DeprecationWarning: A Warning String\n"
                         "  return a\n"),
            "Unexpected warning string: %r" % (msg,))
Example #4
0
def django_tests(verbosity, interactive, failfast, test_labels):
    from django.conf import settings
    state = setup(verbosity, test_labels)
    extra_tests = []

    # Run the test suite, including the extra validation tests.
    from django.test.utils import get_runner
    if not hasattr(settings, 'TEST_RUNNER'):
        settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
    TestRunner = get_runner(settings)

    test_runner = TestRunner(
        verbosity=verbosity,
        interactive=interactive,
        failfast=failfast,
    )
    # Catch warnings thrown in test DB setup -- remove in Django 1.9
    with warnings.catch_warnings():
        warnings.filterwarnings(
            'ignore',
            "Custom SQL location '<app_label>/models/sql' is deprecated, "
            "use '<app_label>/sql' instead.",
            PendingDeprecationWarning
        )
        failures = test_runner.run_tests(
            test_labels or get_installed(), extra_tests=extra_tests)

    teardown(state)
    return failures
def log_likelihood_dataset(f, dataset, log_likelihood_datapoint, logger, ll_fun_wants_log_domain):
    """
    f : log-domain potentials
    ll_fun_wants_log_domain : whether or not the log-likelihood function needs f to be in log-domain (this is false only for the native chain LL implementation)
    """
    #print("f.dtype : %s" % f.dtype)
    #import hashlib
    #print("before " + str(int(hashlib.sha1(f.view(np.uint8)).hexdigest(), 16)))
    if not ll_fun_wants_log_domain:
        try:
            with warnings.catch_warnings():
                warnings.filterwarnings('error')
                f = np.exp(f) # changing semantics of f instead of inserting if's on edge_pot=... and node_pot=...
        except RuntimeWarning as rtw:
            logger.debug("RuntimeWarning: " + str(rtw))
    #print("after " + str(int(hashlib.sha1(f.view(np.uint8)).hexdigest(), 16)))
    ll = 0
    edge_pot = f[dataset.binaries]
#    print(dataset.binaries)
#    print(log_edge_pot)
    #assert(log_edge_pot.shape == (dataset.n_labels, dataset.n_labels))
    for n in range(dataset.N):
        node_pot = f[dataset.unaries[n]]
        ll_datapoint = log_likelihood_datapoint(node_pot, edge_pot, dataset.Y[n], dataset.object_size[n], dataset.n_labels) 
        # if (ll_datapoint >0):
            # info_string = ""
            # info_string += 'log_likelihood_datapoint as computed: %g\n' % ll_datapoint
            # info_string += 'n: %g\n' % n
            # info_string += 'node_pot.tolist(): %s\n' % node_pot.tolist()
            # info_string += 'edge_pot.tolist(): %s\n' % edge_pot.tolist()
            # info_string += 'dataset.Y[n]: %s\n' % dataset.Y[n].tolist()
            # raise Exception("positive log-likelihood is not allowed. More information:\n" + info_string)
        ll += ll_datapoint
        # in grid case, object_size will be ignored
    return ll # LL should not be scaled !
Example #6
0
def test_global_vars():
    x, y, z, t = symbols("x y z t")
    result = codegen(('f', x*y), "F95", header=False, empty=False,
                     global_vars=(y,))
    source = result[0][1]
    expected = (
        "REAL*8 function f(x)\n"
        "implicit none\n"
        "REAL*8, intent(in) :: x\n"
        "f = x*y\n"
        "end function\n"
        )
    assert source == expected

    expected = (
        '#include "f.h"\n'
        '#include <math.h>\n'
        'double f(double x, double y) {\n'
        '   double f_result;\n'
        '   f_result = x*y + z;\n'
        '   return f_result;\n'
        '}\n'
    )
    with warnings.catch_warnings():
        warnings.filterwarnings("ignore", category=SymPyDeprecationWarning)
        result = codegen(('f', x*y+z), "C", header=False, empty=False,
                         global_vars=(z, t))
        source = result[0][1]
        assert source == expected
Example #7
0
def test_ccode_results_named_ordered():
    x, y, z = symbols('x,y,z')
    B, C = symbols('B,C')
    A = MatrixSymbol('A', 1, 3)
    expr1 = Equality(A, Matrix([[1, 2, x]]))
    expr2 = Equality(C, (x + y)*z)
    expr3 = Equality(B, 2*x)
    name_expr = ("test", [expr1, expr2, expr3])
    expected = (
        '#include "test.h"\n'
        '#include <math.h>\n'
        'void test(double x, double *C, double z, double y, double *A, double *B) {\n'
        '   (*C) = z*(x + y);\n'
        '   A[0] = 1;\n'
        '   A[1] = 2;\n'
        '   A[2] = x;\n'
        '   (*B) = 2*x;\n'
        '}\n'
    )
    with warnings.catch_warnings():
        warnings.filterwarnings("ignore", category=SymPyDeprecationWarning)

        result = codegen(name_expr, "c", "test", header=False, empty=False,
                         argument_sequence=(x, C, z, y, A, B))
        source = result[0][1]
        assert source == expected
Example #8
0
def _init_g():
    """Call before using GdkPixbuf/GLib/Gio/GObject"""

    import gi

    gi.require_version("GLib", "2.0")
    gi.require_version("Gio", "2.0")
    gi.require_version("GObject", "2.0")
    gi.require_version("GdkPixbuf", "2.0")

    from gi.repository import GdkPixbuf

    # On windows the default variants only do ANSI paths, so replace them.
    # In some typelibs they are replaced by default, in some don't..
    if os.name == "nt":
        for name in ["new_from_file_at_scale", "new_from_file_at_size",
                     "new_from_file"]:
            cls = GdkPixbuf.Pixbuf
            setattr(
                cls, name, getattr(cls, name + "_utf8", getattr(cls, name)))

    # https://bugzilla.gnome.org/show_bug.cgi?id=670372
    if not hasattr(GdkPixbuf.Pixbuf, "savev"):
        GdkPixbuf.Pixbuf.savev = GdkPixbuf.Pixbuf.save

    # Newer glib is noisy regarding deprecated signals/properties
    # even with stable releases.
    if is_release():
        warnings.filterwarnings(
            'ignore', '.* It will be removed in a future version.',
            Warning)

    # blacklist some modules, simply loading can cause segfaults
    sys.modules["glib"] = None
    sys.modules["gobject"] = None
Example #9
0
def main(argv):

    pd.set_option('display.width', 200)
    pd.set_option('display.height', 500)

    warnings.filterwarnings("ignore")

    global file_path, RMSLE_scorer

    # RMSLE_scorer
    RMSLE_scorer = metrics.make_scorer(RMSLE, greater_is_better = False)

    if(platform.system() == "Windows"):
        file_path = 'C:/Python/Others/data/Kaggle/Caterpillar_Tube_Pricing/'
    else:
        file_path = '/home/roshan/Desktop/DS/Others/data/Kaggle/Caterpillar_Tube_Pricing/'

########################################################################################################################
#Read the input file , munging and splitting the data to train and test
########################################################################################################################
    Train_DS      = pd.read_csv(file_path+'competition_data/train_set.csv',sep=',')
    Actual_DS     = pd.read_csv(file_path+'competition_data/test_set.csv',sep=',')
    Tube_DS       = pd.read_csv(file_path+'competition_data/tube.csv',sep=',')
    Bill_DS       = pd.read_csv(file_path+'competition_data/bill_of_materials.csv',sep=',')
    Spec_DS       = pd.read_csv(file_path+'competition_data/specs.csv',sep=',')
    Tube_End_DS   = pd.read_csv(file_path+'competition_data/tube_end_form.csv',sep=',')
    Comp_DS       = pd.read_csv(file_path+'competition_data/components_2.csv',sep=',')
    Sample_DS     = pd.read_csv(file_path+'sample_submission.csv',sep=',')


    Train_DS, Actual_DS, y =  Data_Munging(Train_DS,Actual_DS,Tube_DS,Bill_DS,Spec_DS,Tube_End_DS, Comp_DS)

    pred_Actual = RFR_Regressor(Train_DS, y, Actual_DS, Sample_DS, grid=False)
Example #10
0
def test_Bug_2543():
    # Test that it possible to add all values to itself / deepcopy
    # This was not possible because validate_bool_maybe_none did not
    # accept None as an argument.
    # https://github.com/matplotlib/matplotlib/issues/2543
    # We filter warnings at this stage since a number of them are raised
    # for deprecated rcparams as they should. We don't want these in the
    # printed in the test suite.
    with warnings.catch_warnings():
        warnings.filterwarnings('ignore',
                                category=MatplotlibDeprecationWarning)
        with mpl.rc_context():
            _copy = mpl.rcParams.copy()
            for key in _copy:
                mpl.rcParams[key] = _copy[key]
        with mpl.rc_context():
            _deep_copy = copy.deepcopy(mpl.rcParams)
        # real test is that this does not raise
        assert validate_bool_maybe_none(None) is None
        assert validate_bool_maybe_none("none") is None

    with pytest.raises(ValueError):
        validate_bool_maybe_none("blah")
    with pytest.raises(ValueError):
        validate_bool(None)
    with pytest.raises(ValueError):
        with mpl.rc_context():
            mpl.rcParams['svg.fonttype'] = True
Example #11
0
def _suppress_scipy_warnings():
    # Infiltrate warnings if necessary
    numpy_ver = versions['numpy']
    scipy_ver = versions['scipy']
    # There is way too much deprecation warnings spit out onto the
    # user. Lets assume that they should be fixed by scipy 0.7.0 time
    if not __debug__ or (__debug__ and 'PY' not in debug.active):
        filter_lines = []
        if "0.6.0" <= scipy_ver and scipy_ver < "0.7.0" \
            and numpy_ver > "1.1.0":
            if __debug__:
                debug('EXT', "Setting up filters for numpy DeprecationWarnings "
                      "regarding scipy < 0.7.0")
            filter_lines += [
                ('NumpyTest will be removed in the next release.*',
                 DeprecationWarning),
                ('PyArray_FromDims: use PyArray_SimpleNew.',
                 DeprecationWarning),
                ('PyArray_FromDimsAndDataAndDescr: use PyArray_NewFromDescr.',
                 DeprecationWarning),
                # Trick re.match, since in warnings absent re.DOTALL in re.compile
                ('[\na-z \t0-9]*The original semantics of histogram is scheduled to be.*'
                 '[\na-z \t0-9]*', Warning) ]
        if scipy_ver >= "0.15":
            filter_lines += [("`scipy.weave` is deprecated, use `weave` instead!",
                              DeprecationWarning)]
        if scipy_ver >= "0.16":
            # scipy deprecated it but statsmodels still import it for now
            filter_lines += [("`scipy.linalg.calc_lwork` is deprecated!",
                              DeprecationWarning)]
        for f, w in filter_lines:
            warnings.filterwarnings('ignore', f, w)
    def load_mysql(self, model, csv_path):
        import warnings
        import MySQLdb
        warnings.filterwarnings("ignore", category=MySQLdb.Warning)

        # Flush the target model
        self.cursor.execute('TRUNCATE TABLE %s' % model._meta.db_table)

        # Build the MySQL LOAD DATA INFILE command
        bulk_sql_load_part_1 = """
            LOAD DATA LOCAL INFILE '%s'
            INTO TABLE %s
            FIELDS TERMINATED BY ','
            OPTIONALLY ENCLOSED BY '"'
            LINES TERMINATED BY '\\n'
            IGNORE 1 LINES
            (
        """ % (
            csv_path,
            model._meta.db_table
        )

        # Get the headers and the row count from the source CSV
        csv_headers = self.get_headers(csv_path)
        csv_record_cnt = self.get_row_count(csv_path)

        header_sql_list = []
        field_types = dict(
            (f.db_column, f.db_type(self.connection))
            for f in model._meta.fields
        )
        date_set_list = []

        for h in csv_headers:
            # Pull the data type of the field
            data_type = field_types[h]
            # If it is a date field, we need to reformat the data
            # so that MySQL will properly parse it on the way in.
            if data_type == 'date':
                header_sql_list.append('@`%s`' % h)
                date_set_list.append(
                    "`%s` =  %s" % (h, self.date_sql % h)
                )
            elif data_type == 'datetime':
                header_sql_list.append('@`%s`' % h)
                date_set_list.append(
                    "`%s` =  %s" % (h, self.datetime_sql % h)
                )
            else:
                header_sql_list.append('`%s`' % h)

        bulk_sql_load = bulk_sql_load_part_1 + ','.join(header_sql_list) + ')'
        if date_set_list:
            bulk_sql_load += " set %s" % ",".join(date_set_list)

        # Run the query
        cnt = self.cursor.execute(bulk_sql_load)

        # Report back on how we did
        self.finish_load_message(cnt, csv_record_cnt)
Example #13
0
def test_rcparams_init():
    with pytest.raises(ValueError):
        with warnings.catch_warnings():
            warnings.filterwarnings('ignore',
                                message='.*(validate)',
                                category=UserWarning)
            mpl.RcParams({'figure.figsize': (3.5, 42, 1)})
Example #14
0
    def setUp(self):
        self.warn_filters = warnings.filters
        warnings.filterwarnings('ignore',
                                category=FutureWarning,
                                module=".*format")

        self.frame = _frame.copy()
	def __init__(self,path_to_corpora):
		## Built-in dictionary for word-parser, and path to corpora
		self.stopword = stopwords.words('english')
		self.path_to_corpora = path_to_corpora
		warnings.filterwarnings("ignore")
		print 'Initialize LDAModel....path to corpora : ',path_to_corpora

		## Hyperparameters for training model
		# Minimun length of single document
		self.min_length = 200
		# Num_topics in LDA
		self.num_topics = 90
		# Filter out tokens that appear in less than `no_below` documents (absolute number)
		self.no_below_this_number = 50
		# Filter out tokens that appear in more than `no_above` documents (fraction of total corpus size, *not* absolute number).
		self.no_above_fraction_of_doc = 0.2
		# Remove topic which weights less than this number
		self.remove_topic_so_less = 0.05
		# Number of iterations in training LDA model, the less the documents in total, the more the iterations for LDA model to converge
		self.num_of_iterations = 1000
		# Number of passes in the model
		self.passes = 3
		#Print all hyperparameters
		parameters = {}
		parameters['min_length'] = self.min_length
		parameters['num_topics'] = self.num_topics
		parameters['no_below_this_number'] = self.no_below_this_number
		parameters['no_above_fraction_of_doc'] = self.no_above_fraction_of_doc
		parameters['remove_topic_so_less'] = self.remove_topic_so_less
		parameters['num_of_iterations'] = self.num_of_iterations
		parameters['passes'] = self.passes
		for k in parameters:
		    print "Parameter for {0} is {1}".format(k,parameters[k])
		print 'Finished initializing....'
Example #16
0
    def test_N_put_without_confirm(self, sftp):
        """
        verify that get/put work without confirmation.
        """
        warnings.filterwarnings('ignore', 'tempnam.*')

        fd, localname = mkstemp()
        os.close(fd)
        text = b'All I wanted was a plastic bunny rabbit.\n'
        with open(localname, 'wb') as f:
            f.write(text)
        saved_progress = []

        def progress_callback(x, y):
            saved_progress.append((x, y))
        res = sftp.put(localname, sftp.FOLDER + '/bunny.txt', progress_callback, False)

        assert SFTPAttributes().attr == res.attr

        with sftp.open(sftp.FOLDER + '/bunny.txt', 'r') as f:
            assert text == f.read(128)
        assert (41, 41) == saved_progress[-1]

        os.unlink(localname)
        sftp.unlink(sftp.FOLDER + '/bunny.txt')
Example #17
0
def capture_glib_warnings(allow_warnings=False, allow_criticals=False):
    """Temporarily suppress glib warning output and record them.

    The test suite is run with G_DEBUG="fatal-warnings fatal-criticals"
    by default. Setting allow_warnings and allow_criticals will temporarily
    allow warnings or criticals without terminating the test run.
    """

    old_mask = GLib.log_set_always_fatal(GLib.LogLevelFlags(0))

    new_mask = old_mask
    if allow_warnings:
        new_mask &= ~GLib.LogLevelFlags.LEVEL_WARNING
    if allow_criticals:
        new_mask &= ~GLib.LogLevelFlags.LEVEL_CRITICAL

    GLib.log_set_always_fatal(GLib.LogLevelFlags(new_mask))

    GLibWarning = gi._gi._gobject.Warning
    try:
        with warnings.catch_warnings(record=True) as warn:
            warnings.filterwarnings('always', category=GLibWarning)
            yield warn
    finally:
        GLib.log_set_always_fatal(old_mask)
Example #18
0
def test_cs_graph_components():
    D = np.eye(4, dtype=np.bool)

    warn_ctx = WarningManager()
    warn_ctx.__enter__()
    try:
        warnings.filterwarnings("ignore",
                    message="`cs_graph_components` is deprecated")

        n_comp, flag = csgraph.cs_graph_components(csr_matrix(D))
        assert_(n_comp == 4)
        assert_equal(flag, [0, 1, 2, 3])

        D[0, 1] = D[1, 0] = 1

        n_comp, flag = csgraph.cs_graph_components(csr_matrix(D))
        assert_(n_comp == 3)
        assert_equal(flag, [0, 0, 1, 2])

        # A pathological case...
        D[2, 2] = 0
        n_comp, flag = csgraph.cs_graph_components(csr_matrix(D))
        assert_(n_comp == 2)
        assert_equal(flag, [0, 0, -2, 1])
    finally:
        warn_ctx.__exit__()
Example #19
0
    def test_H_get_put(self, sftp):
        """
        verify that get/put work.
        """
        warnings.filterwarnings('ignore', 'tempnam.*')

        fd, localname = mkstemp()
        os.close(fd)
        text = b'All I wanted was a plastic bunny rabbit.\n'
        with open(localname, 'wb') as f:
            f.write(text)
        saved_progress = []

        def progress_callback(x, y):
            saved_progress.append((x, y))
        sftp.put(localname, sftp.FOLDER + '/bunny.txt', progress_callback)

        with sftp.open(sftp.FOLDER + '/bunny.txt', 'rb') as f:
            assert text == f.read(128)
        assert [(41, 41)] == saved_progress

        os.unlink(localname)
        fd, localname = mkstemp()
        os.close(fd)
        saved_progress = []
        sftp.get(sftp.FOLDER + '/bunny.txt', localname, progress_callback)

        with open(localname, 'rb') as f:
            assert text == f.read(128)
        assert [(41, 41)] == saved_progress

        os.unlink(localname)
        sftp.unlink(sftp.FOLDER + '/bunny.txt')
Example #20
0
def init(format_prefix="", output_mode=OutputMode.LOGFILE_WITH_CONSOLE_ERRORS, logfile_path=DEFAULT_LOGFILE_PATH):
    if output_mode == "/dev/null":
        assert output_mode != OutputMode.LOGFILE, "Must enable a logging mode."
        output_mode = OutputMode.CONSOLE

    # Start with the default
    logging.config.dictConfig( get_default_config( format_prefix, output_mode, logfile_path ) )
    
    # Update from the user's customizations
    loggingHelpers.updateFromConfigFile()
    
    # Capture warnings from the warnings module
    logging.captureWarnings(True)
    
    # Warnings module warnings are shown only once
    warnings.filterwarnings("once")

    # Don't warn about pending deprecations (PyQt generates some of these)
    warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
    
    # Custom format for warnings
    def simple_warning_format(message, category, filename, lineno, line=None):
        filename = os.path.split(filename)[1]
        return filename + "(" + str(lineno) + "): " + category.__name__ + ": " + message[0]

    warnings.formatwarning = simple_warning_format
Example #21
0
def test_version_2_0_memmap():
    # requires more than 2 byte for header
    dt = [(("%d" % i) * 100, float) for i in range(500)]
    d = np.ones(1000, dtype=dt)
    tf = tempfile.mktemp('', 'mmap', dir=tempdir)

    # 1.0 requested but data cannot be saved this way
    assert_raises(ValueError, format.open_memmap, tf, mode='w+', dtype=d.dtype,
                            shape=d.shape, version=(1, 0))

    ma = format.open_memmap(tf, mode='w+', dtype=d.dtype,
                            shape=d.shape, version=(2, 0))
    ma[...] = d
    del ma

    with warnings.catch_warnings(record=True) as w:
        warnings.filterwarnings('always', '', UserWarning)
        ma = format.open_memmap(tf, mode='w+', dtype=d.dtype,
                                shape=d.shape, version=None)
        assert_(w[0].category is UserWarning)
        ma[...] = d
        del ma

    ma = format.open_memmap(tf, mode='r')
    assert_array_equal(ma, d)
Example #22
0
def main():

    warnings.filterwarnings("ignore", "Degrees of freedom <= 0 for slice", RuntimeWarning)

    options = getoptions()

    setuplogger(options['log'], options['logfile'], logging.INFO)

    total_procs = options['nprocs'] * options['total_instances']
    start_offset = options['instance_id'] * options['nprocs']

    exit_code = 0

    if options['nprocs'] == 1:
        createsummary(options, None, None)
    else:
        proclist = []
        for procid in xrange(options['nprocs']):
            p = Process( target=createsummary, args=(options, total_procs, start_offset + procid) )
            p.start()
            proclist.append(p)

        for proc in proclist:
            proc.join()
            exit_code += proc.exitcode

    sys.exit(exit_code)
Example #23
0
def get_build_from_file (platform, file_name, name):
    gub_name = file_name.replace (os.getcwd () + '/', '')
    logging.verbose ('reading spec: %(gub_name)s\n' % locals ())
    # Ugh, FIXME
    # This loads gub/specs/darwin/python.py in PYTHON. namespace,
    # overwriting the PYTHON. namespace from gub/specs/python.py
    # Current workaround: always/also use __darwin etc. postfixing
    # of class names, also in specs/darwin/ etc.
    warnings.filterwarnings ('ignore', '''Parent module 'python-2' ''')
    module = misc.load_module (file_name, name)
    # cross/gcc.py:Gcc will be called: cross/Gcc.py,
    # to distinguish from specs/gcc.py:Gcc.py
    base = os.path.basename (name)
    class_name = ((base[0].upper () + base[1:])
                  .replace ('-', '_')
                  .replace ('.', '_')
                  .replace ('++', '_xx_')
                  .replace ('+', '_x_')
                  + ('-' + platform).replace ('-', '__'))
    logging.debug ('LOOKING FOR: %(class_name)s\n' % locals ())
    cls = misc.most_significant_in_dict (module.__dict__, class_name, '__')
    if (platform == 'tools32'
        and (not cls or issubclass (cls, target.AutoBuild))):
        cls = misc.most_significant_in_dict (module.__dict__, class_name.replace ('tools32', 'tools'), '__')
    if ((platform == 'tools' or platform == 'tools32')
        and (issubclass (cls, target.AutoBuild)
             and not issubclass (cls, tools.AutoBuild)
             and not issubclass (cls, tools32.AutoBuild))):
        cls = None
    return cls
Example #24
0
    def test_tmpnam(self):
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning,
                                    r"test_os$")
            warnings.filterwarnings("ignore", "tmpnam", DeprecationWarning)

            name = os.tmpnam()
            if sys.platform in ("win32",):
                # The Windows tmpnam() seems useless.  From the MS docs:
                #
                #     The character string that tmpnam creates consists of
                #     the path prefix, defined by the entry P_tmpdir in the
                #     file STDIO.H, followed by a sequence consisting of the
                #     digit characters '0' through '9'; the numerical value
                #     of this string is in the range 1 - 65,535.  Changing the
                #     definitions of L_tmpnam or P_tmpdir in STDIO.H does not
                #     change the operation of tmpnam.
                #
                # The really bizarre part is that, at least under MSVC6,
                # P_tmpdir is "\\".  That is, the path returned refers to
                # the root of the current drive.  That's a terrible place to
                # put temp files, and, depending on privileges, the user
                # may not even be able to open a file in the root directory.
                self.assertFalse(os.path.exists(name),
                            "file already exists for temporary file")
            else:
                self.check_tempfile(name)
    def test_norm_hash_name(self):
        "test norm_hash_name()"
        from itertools import chain
        from passlib.utils.pbkdf2 import norm_hash_name, _nhn_hash_names

        # test formats
        for format in self.ndn_formats:
            norm_hash_name("md4", format)
        self.assertRaises(ValueError, norm_hash_name, "md4", None)
        self.assertRaises(ValueError, norm_hash_name, "md4", "fake")

        # test types
        self.assertEqual(norm_hash_name(u("MD4")), "md4")
        self.assertEqual(norm_hash_name(b("MD4")), "md4")
        self.assertRaises(TypeError, norm_hash_name, None)

        # test selected results
        with catch_warnings():
            warnings.filterwarnings("ignore", ".*unknown hash")
            for row in chain(_nhn_hash_names, self.ndn_values):
                for idx, format in enumerate(self.ndn_formats):
                    correct = row[idx]
                    for value in row:
                        result = norm_hash_name(value, format)
                        self.assertEqual(result, correct, "name=%r, format=%r:" % (value, format))
Example #26
0
    def test_N_put_without_confirm(self):
        """
        verify that get/put work without confirmation.
        """
        warnings.filterwarnings('ignore', 'tempnam.*')

        """
        localname = os.tempnam()
        text = 'All I wanted was a plastic bunny rabbit.\n'
        f = open(localname, 'wb')
        f.write(text)
        f.close()
        """

        text = b'All I wanted was a plastic bunny rabbit.\n'

        f = tempfile.NamedTemporaryFile(delete=False)
        localname = f.name
        f.write(text)
        f.close()

        saved_progress = []
        def progress_callback(x, y):
            saved_progress.append((x, y))
        res = sftp.put(localname, FOLDER + '/bunny.txt', progress_callback, False)

        self.assertEquals(SFTPAttributes().attr, res.attr)

        f = sftp.open(FOLDER + '/bunny.txt', 'r')
        self.assertEquals(text, f.read(128))
        f.close()
        self.assertEquals((41, 41), saved_progress[-1])

        os.unlink(localname)
        sftp.unlink(FOLDER + '/bunny.txt')
Example #27
0
    def test_warnings_on_cleanup(self) -> None:
        # Two kinds of warning on shutdown
        #   Issue 10888: may write to stderr if modules are nulled out
        #   ResourceWarning will be triggered by __del__
        with self.do_create() as dir:
            if os.sep != '\\':
                # Embed a backslash in order to make sure string escaping
                # in the displayed error message is dealt with correctly
                suffix = '\\check_backslash_handling'
            else:
                suffix = ''
            d = self.do_create(dir=dir, suf=suffix)

            #Check for the Issue 10888 message
            modules = [os, os.path]
            if has_stat:
                modules.append(stat)
            with support.captured_stderr() as err:
                with NulledModules(*modules):
                    d.cleanup()
            message = err.getvalue().replace('\\\\', '\\')
            self.assertIn("while cleaning up",  message)
            self.assertIn(d.name,  message)

            # Check for the resource warning
            with support.check_warnings(('Implicitly', ResourceWarning), quiet=False):
                warnings.filterwarnings("always", category=ResourceWarning)
                d.__del__()
            self.assertFalse(os.path.exists(d.name),
                        "TemporaryDirectory %s exists after __del__" % d.name)
Example #28
0
    def test_5_save_host_keys(self):
        """
        verify that SSHClient correctly saves a known_hosts file.
        """
        warnings.filterwarnings('ignore', 'tempnam.*')

        host_key = paramiko.RSAKey.from_private_key_file(test_path('test_rsa.key'))
        public_host_key = paramiko.RSAKey(data=host_key.asbytes())
        fd, localname = mkstemp()
        os.close(fd)

        client = paramiko.SSHClient()
        self.assertEquals(0, len(client.get_host_keys()))

        host_id = '[%s]:%d' % (self.addr, self.port)

        client.get_host_keys().add(host_id, 'ssh-rsa', public_host_key)
        self.assertEquals(1, len(client.get_host_keys()))
        self.assertEquals(public_host_key, client.get_host_keys()[host_id]['ssh-rsa'])

        client.save_host_keys(localname)

        with open(localname) as fd:
            assert host_id in fd.read()

        os.unlink(localname)
Example #29
0
    def request(self, method, amp, path='/', **kwargs):
        LOG.debug("request url %s", path)
        _request = getattr(self.session, method.lower())
        _url = self._base_url(amp.lb_network_ip) + path
        LOG.debug("request url " + _url)
        timeout_tuple = (CONF.haproxy_amphora.rest_request_conn_timeout,
                         CONF.haproxy_amphora.rest_request_read_timeout)
        reqargs = {
            'verify': CONF.haproxy_amphora.server_ca,
            'url': _url,
            'timeout': timeout_tuple, }
        reqargs.update(kwargs)
        headers = reqargs.setdefault('headers', {})

        headers['User-Agent'] = OCTAVIA_API_CLIENT
        self.ssl_adapter.uuid = amp.id
        # Keep retrying
        for a in six.moves.xrange(CONF.haproxy_amphora.connection_max_retries):
            try:
                with warnings.catch_warnings():
                    warnings.filterwarnings(
                        "ignore",
                        message="A true SSLContext object is not available"
                    )
                    r = _request(**reqargs)
            except (requests.ConnectionError, requests.Timeout):
                LOG.warning(_LW("Could not connect to instance. Retrying."))
                time.sleep(CONF.haproxy_amphora.connection_retry_interval)
                if a == CONF.haproxy_amphora.connection_max_retries - 1:
                    raise driver_except.TimeOutException()
            else:
                return r
        raise driver_except.UnavailableException()
Example #30
0
def main(argv=None, interactive=True):
    """
    Entry point for setup.py.

    Wrapper for a profiler if requested otherwise just call run() directly.
    If profiling is enabled we disable interactivity as it would wait for user
    input and influence the statistics. However the -r option still works.
    """
    # catch and ignore a NumPy deprecation warning
    with warnings.catch_warnings(record=True):
        warnings.filterwarnings(
            "ignore", 'The compiler package is deprecated and removed in '
            'Python 3.x.', DeprecationWarning)
        np.safe_eval('1')

    if '-p' in sys.argv or '--profile' in sys.argv:
        try:
            import cProfile as Profile
        except ImportError:
            import Profile
        Profile.run('from obspy.scripts.runtests import run; run()',
                    'obspy.pstats')
        import pstats
        stats = pstats.Stats('obspy.pstats')
        print()
        print("Profiling:")
        stats.sort_stats('cumulative').print_stats('obspy.', 20)
        print(PSTATS_HELP)
    else:
        errors = run(argv, interactive)
        if errors:
            sys.exit(1)
Example #31
0
def mulens(
        fid, magpsf, sigmapsf, magnr, sigmagnr,
        magzpsci, isdiffpos, rf, pca):
    """ Returns the predicted class (among microlensing, variable star,
    cataclysmic event, and constant event) & probability of an alert to be
    a microlensing event in each band using a Random Forest Classifier.

    Parameters
    ----------
    fid: Spark DataFrame Column
        Filter IDs (int)
    magpsf, sigmapsf: Spark DataFrame Columns
        Magnitude from PSF-fit photometry, and 1-sigma error
    magnr, sigmagnr: Spark DataFrame Columns
        Magnitude of nearest source in reference image PSF-catalog
        within 30 arcsec and 1-sigma error
    magzpsci: Spark DataFrame Column
        Magnitude zero point for photometry estimates
    isdiffpos: Spark DataFrame Column
        t => candidate is from positive (sci minus ref) subtraction
        f => candidate is from negative (ref minus sci) subtraction
    rf: RandomForestClassifier
        sklearn.ensemble._forest.RandomForestClassifier
    pca: PCA
        sklearn.decomposition._pca.PCA

    Returns
    ----------
    out: list
        Returns the class (string) and microlensing score (double) ordered as
        [class_band_1, ml_score_band1, class_band_2, ml_score_band2]

    Examples
    ---------
    >>> from fink_science.microlensing.classifier import load_external_model
    >>> from fink_science.microlensing.classifier import load_mulens_schema_twobands
    >>> from fink_science.utilities import concat_col
    >>> from pyspark.sql import functions as F

    # wrapper to pass broadcasted values
    >>> def mulens_wrapper(fid, magpsf, sigmapsf, magnr, sigmagnr, magzpsci, isdiffpos):
    ...     return mulens(fid, magpsf, sigmapsf, magnr, sigmagnr, magzpsci, isdiffpos, rfbcast.value, pcabcast.value)

    >>> df = spark.read.load(ztf_alert_sample)

    >>> schema = load_mulens_schema_twobands()

    # Required alert columns
    >>> what = [
    ...    'fid', 'magpsf', 'sigmapsf',
    ...    'magnr', 'sigmagnr', 'magzpsci', 'isdiffpos']

    # Use for creating temp name
    >>> prefix = 'c'
    >>> what_prefix = [prefix + i for i in what]

    # Append temp columns with historical + current measurements
    >>> for colname in what:
    ...    df = concat_col(df, colname, prefix=prefix)

    >>> rf, pca = load_external_model(model_path)
    >>> rfbcast = spark.sparkContext.broadcast(rf)
    >>> pcabcast = spark.sparkContext.broadcast(pca)

    >>> t = F.udf(mulens_wrapper, schema)
    >>> args = [F.col(i) for i in what_prefix]
    >>> df_mulens = df.withColumn('mulens', t(*args))

    # Drop temp columns
    >>> df_mulens = df_mulens.drop(*what_prefix)

    >>> df_mulens.agg({"mulens.ml_score_1": "min"}).collect()[0][0]
    0.0

    >>> df_mulens.agg({"mulens.ml_score_1": "max"}).collect()[0][0] < 1.0
    True
    """
    warnings.filterwarnings('ignore')

    # Select only valid measurements (not upper limits)
    badval = None
    maskNotNone = np.array(magpsf) != badval

    out = []
    for filt in [1, 2]:
        maskFilter = np.array(fid) == filt
        m = maskNotNone * maskFilter

        # Reject if less than 10 measurements
        if np.sum(m) < 10:
            out.extend(['', 0.0])
            continue

        # Compute DC mag
        mag, err = np.array([
            dc_mag(i[0], i[1], i[2], i[3], i[4], i[5], i[6])
            for i in zip(
                np.array(fid)[m],
                np.array(magpsf)[m],
                np.array(sigmapsf)[m],
                np.array(magnr)[m],
                np.array(sigmagnr)[m],
                np.array(magzpsci)[m],
                np.array(isdiffpos)[m])
        ]).T

        # Run the classifier
        output = microlensing_classifier.predict(mag, err, rf, pca)

        # Update the results
        # Beware, in the branch FINK the order has changed
        # classification,p_cons,p_CV,p_ML,p_var = microlensing_classifier.predict()
        out.extend([str(output[0]), float(output[3][0])])

    return out
Example #32
0
import dash_html_components as html
import dash_table_experiments as dt

import base64
import json
import plotly
import io
import dash

from datetime import datetime

import flask

import warnings

warnings.filterwarnings('ignore')

# ==================================

warnings.filterwarnings("ignore")

layout_doctor = html.Div(children=[
    html.Div(className="container",
             style={"display": "grid"},
             children=[
                 html.Button(children=[html.A('prediction', href='/success')],
                             n_clicks=0,
                             type='button',
                             style={"margin-top": "10px"}),
                 html.Button(children=[html.A('search', href='/search')],
                             n_clicks=0,
Example #33
0
Original file is located at
    https://colab.research.google.com/drive/1f3ewu9kbT-VZ5zgy8hjMFmI-0RYTAHho
"""
"""**Train Test Split**"""

import warnings

import numpy as np
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier

warnings.filterwarnings("ignore", category=DeprecationWarning)

iris = load_iris()

X = iris.data
y = iris.target

X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.3,
                                                    random_state=3)

# logistic_reg
lgr = LogisticRegression(solver='liblinear', multi_class='auto')
lgr.fit(X_train, y_train)
y_pred = lgr.predict(X_test)
Example #34
0
    import distutils
    import glob
    import inspect
    import json
    import logging
    import os
    import re
    import shutil
    import sys
    import tempfile
    import threading
    import time
    import traceback
    import warnings

    warnings.filterwarnings(action="ignore",
                            message="Python 2 is no longer supported")
    warnings.filterwarnings(action="ignore",
                            message=".*was already imported",
                            category=UserWarning)
    warnings.filterwarnings(action="ignore",
                            message=".*using a very old release",
                            category=UserWarning)
    warnings.filterwarnings(action="ignore",
                            message=".*default buffer size will be used",
                            category=RuntimeWarning)
    warnings.filterwarnings(action="ignore",
                            category=UserWarning,
                            module="psycopg2")

    if "--deprecations" not in sys.argv:
        warnings.filterwarnings(action="ignore", category=DeprecationWarning)
###########################################
# Suppress matplotlib user warnings
# Necessary for newer version of matplotlib
import warnings
warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib")
#
# Display inline matplotlib plots with IPython
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'inline')
###########################################

import matplotlib.pyplot as pl
import numpy as np
from sklearn.model_selection import learning_curve
from sklearn.model_selection import validation_curve
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import ShuffleSplit, train_test_split

def ModelLearning(X, y):
    """ Calculates the performance of several models with varying sizes of training data.
        The learning and testing scores for each model are then plotted. """

    # Create 10 cross-validation sets for training and testing
    cv = ShuffleSplit(n_splits = 10, test_size = 0.2, random_state = 0)

    # Generate the training set sizes increasing by 50
    train_sizes = np.rint(np.linspace(1, X.shape[0]*0.8 - 1, 9)).astype(int)

    # Create the figure window
    fig = pl.figure(figsize=(10,7))
Example #36
0
"""
Bioconda Utils Command Line Interface

"""

# Workaround for spurious numpy warning message
# ".../importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size \
# changed, may indicate binary incompatibility. Expected 96, got 88"
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")

import sys
import os
import shlex
import logging
from collections import defaultdict, Counter
from functools import partial
import inspect
from typing import List, Tuple

import argh
from argh import arg, named
import networkx as nx
from networkx.drawing.nx_pydot import write_dot
import pandas

from . import __version__ as VERSION
from . import utils
from .build import build_recipes
from . import docker_utils
from . import lint
Example #37
0
#!/usr/bin/env pytho
# coding: utf-8
# uadb release 26-09-2019
import json
import sys
import stat
import os
from datetime import date
import re
try:
    from StringIO import StringIO
except ImportError:
    from io import StringIO
import warnings
warnings.filterwarnings("error")
# from pdb import set_trace
# import pprint
# pp = pprint.pprint

__name__ = "uadb"
__date__ = "12-12-2019"
__version__ = "0.1.8"
__author__ = "Giuseppe Materni"


class Log(object):
    def __init__(self, path_log, out=1):
        self.out = out
        self.path_log = path_log
        self.log_start = 0
Example #38
0
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras import backend as K

import tensorflow as tf

# Set some parameters
IMG_WIDTH = 128
IMG_HEIGHT = 128
IMG_CHANNELS = 1
TRAIN_PATH = './data/stage1_train/'
TEST_PATH = './data/stage1_test/'
patch_size = 128

warnings.filterwarnings('ignore', category=UserWarning, module='skimage')
seed = 42
random.seed = seed
np.random.seed = seed

# In[142]:

# Get custom functions
from Cell_Helpers import *

# In[4]:

# Get train and test IDs
train_ids = next(os.walk(TRAIN_PATH))[1]
test_ids = next(os.walk(TEST_PATH))[1]
Example #39
0
import warnings

from scs_analysis.chart.single_chart import SingleChart
from scs_analysis.cmd.cmd_single_chart import CmdSingleChart

from scs_core.data.json import JSONify
from scs_core.data.path_dict import PathDict

from scs_core.sync.line_reader import LineReader


# --------------------------------------------------------------------------------------------------------------------

if __name__ == '__main__':

    warnings.filterwarnings("ignore", module="matplotlib")

    # ----------------------------------------------------------------------------------------------------------------
    # cmd...

    cmd = CmdSingleChart()

    if cmd.verbose:
        print("single_chart: %s" % cmd, file=sys.stderr)

    chart = None
    proc = None

    try:
        # ------------------------------------------------------------------------------------------------------------
        # resources...
Example #40
0
def extract_features_mulens(
        fid, magpsf, sigmapsf, magnr, sigmagnr,
        magzpsci, isdiffpos):
    """ Returns the predicted class (among microlensing, variable star,
    cataclysmic event, and constant event) & probability of an alert to be
    a microlensing event in each band using a Random Forest Classifier.

    Parameters
    ----------
    fid: Spark DataFrame Column
        Filter IDs (int)
    magpsf, sigmapsf: Spark DataFrame Columns
        Magnitude from PSF-fit photometry, and 1-sigma error
    magnr, sigmagnr: Spark DataFrame Columns
        Magnitude of nearest source in reference image PSF-catalog
        within 30 arcsec and 1-sigma error
    magzpsci: Spark DataFrame Column
        Magnitude zero point for photometry estimates
    isdiffpos: Spark DataFrame Column
        t => candidate is from positive (sci minus ref) subtraction
        f => candidate is from negative (ref minus sci) subtraction

    Returns
    ----------
    out: list of string
        Return the features (2 * 47)

    Examples
    ----------
    >>> from pyspark.sql.functions import split
    >>> from pyspark.sql.types import FloatType
    >>> from fink_science.utilities import concat_col
    >>> from pyspark.sql import functions as F

    >>> df = spark.read.load(ztf_alert_sample)

    # Required alert columns
    >>> what = ['fid', 'magpsf', 'sigmapsf', 'magnr', 'sigmagnr', 'magzpsci', 'isdiffpos']

    # Use for creating temp name
    >>> prefix = 'c'
    >>> what_prefix = [prefix + i for i in what]

    # Append temp columns with historical + current measurements
    >>> for colname in what:
    ...    df = concat_col(df, colname, prefix=prefix)

    # Perform the fit + classification (default model)
    >>> args = [F.col(i) for i in what_prefix]
    >>> df = df.withColumn('features', extract_features_mulens(*args))

    >>> for name in LIA_FEATURE_NAMES:
    ...   index = LIA_FEATURE_NAMES.index(name)
    ...   df = df.withColumn(name, split(df['features'], ',')[index].astype(FloatType()))

    # Trigger something
    >>> df.agg({LIA_FEATURE_NAMES[0]: "min"}).collect()[0][0]
    0.0
    """
    warnings.filterwarnings('ignore')

    # Loop over alerts
    outs = []
    for index in range(len(fid)):
        # Select only valid measurements (not upper limits)
        maskNotNone = np.array(magpsf.values[index]) == np.array(magpsf.values[index])

        # Loop over filters
        out = ''
        for filt in [1, 2]:
            maskFilter = np.array(fid.values[index]) == filt
            m = maskNotNone * maskFilter

            # Reject if less than 10 measurements
            if np.sum(m) < 10:
                out += ','.join(['0'] * len(LIA_FEATURE_NAMES))
                continue

            # Compute DC mag
            mag, err = np.array([
                dc_mag(i[0], i[1], i[2], i[3], i[4], i[5], i[6])
                for i in zip(
                    np.array(fid.values[index])[m],
                    np.array(magpsf.values[index])[m],
                    np.array(sigmapsf.values[index])[m],
                    np.array(magnr.values[index])[m],
                    np.array(sigmagnr.values[index])[m],
                    np.array(magzpsci.values[index])[m],
                    np.array(isdiffpos.values[index])[m])
            ]).T

            # Run the classifier
            output = _extract(mag, err)

            # Update the results
            out += output
        outs.append(out)

    return pd.Series(outs)
Example #41
0
from warnings import filterwarnings
import pandas as pd
from sklearn.model_selection import train_test_split
from constants import BATCH_SIZE, MAX_SENTENCE_LEN, FILENAME
import numpy as np
from collections import Counter
import random
from dataset import *
import emot
import demoji

filterwarnings('ignore', '.* class will be retired')
np.random.seed(2)
random.seed(2)


def extract_emotion(means):
    meanings = means.split(',')
    return random.choice(meanings)


def clean_sentence(sentence):
    """
    replaces all emojis and emoticons with their text equivalent
    :param sentence: str, raw text
    :return: clean text
    """
    reference = demoji.findall(sentence)
    # print(reference)
    emoticons = emot.emoticons(sentence)
    if isinstance(emoticons, list):
        - val_batches_get_indices & val_batches_by_indices
        - test_batches_get_indices & test_batches_by_indices
    - Added function convert_videos (convert raw videos to small size)
    - Added function update_predictions_at
-------------------------------------------------------------------------------------------------------------------------------
'''

from itertools import islice, cycle
import numpy as np
import os
import pandas as pd
import skvideo.io as skv
import random

from warnings import filterwarnings
filterwarnings('ignore')

from multilabel import multilabel_train_test_split

from scipy.misc import imresize

class Dataset(object):
    
    def __init__(self, datapath, dataset_type='nano', reduce_frames=True, non_blank_oversampling = False, val_size=0.3, batch_size=16, test=False):
        
        self.non_blank_oversampling = non_blank_oversampling
        self.datapath = datapath
        self.dataset_type = dataset_type
        self.reduce_frames = reduce_frames
        self.val_size = val_size
        self.batch_size = batch_size
Example #43
0
#::: plotting settings
import seaborn as sns
sns.set(context='paper', style='ticks', palette='deep', font='sans-serif', font_scale=1.5, color_codes=True)
sns.set_style({"xtick.direction": "in","ytick.direction": "in"})
sns.set_context(rc={'lines.markeredgewidth': 1})

#::: modules
import numpy as np
import matplotlib.pyplot as plt
import os, sys
import warnings
from astropy.time import Time
#import pickle
from tqdm import tqdm
warnings.filterwarnings('ignore', category=np.VisibleDeprecationWarning) 
warnings.filterwarnings('ignore', category=np.RankWarning) 

#::: allesfitter modules
from . import config
from .utils import latex_printer
from .computer import update_params,\
                     calculate_model, rv_fct, flux_fct,\
                     calculate_baseline,calculate_stellar_var,\
                     calculate_yerr_w,\
                     flux_subfct_sinusoidal_phase_curves
from .exoworlds_rdx.lightcurves import lightcurve_tools as lct
from .exoworlds_rdx.lightcurves.index_transits import get_tmid_observed_transits
                    
                    
 
Example #44
0
# pylint: disable=C0103,C0301,C0326,W0703
"""This runner implements the main benchmark for qualitative analysis based on the full training and test sets."""
import os
import warnings
import numpy as np

from MLT.implementations import Autoencoder, HBOS, IsolationForest, LSTM_2_Multiclass, RandomForest, XGBoost

from MLT.metrics import metrics
from MLT.tools import dataset_tools, result_mail, toolbelt

# supress deprecation warning. sklearn is currently built against an older numpy version.
warnings.filterwarnings(
    module='sklearn*',
    action='ignore',
    category=DeprecationWarning,
    message='The truth value of an empty array is ambiguous*')

# supress future warning, as this is in the responsibility of pyod.
warnings.filterwarnings(
    module='scipy*',
    action='ignore',
    category=FutureWarning,
    message='Using a non-tuple sequence for multidimensional*')


def run_benchmark(train_data, train_labels, test_data, test_labels,
                  result_path, model_savepath, args):
    """Run the full benchmark.

    As this is the full benchmark, it needs a train and a test partition.
Example #45
0
#!/usr/bin/env python

import warnings

warnings.filterwarnings("ignore", "Motif probs overspecified")
warnings.filterwarnings("ignore", "Model not reversible")

from numpy import ones, dot, array

from cogent import LoadSeqs, DNA, LoadTree, LoadTable
from cogent.evolve.substitution_model import Nucleotide, General, \
                                                GeneralStationary
from cogent.evolve.discrete_markov import DiscreteSubstitutionModel
from cogent.evolve.predicate import MotifChange
from cogent.util.unit_test import TestCase, main
from cogent.maths.matrix_exponentiation import PadeExponentiator as expm

__author__ = "Peter Maxwell and  Gavin Huttley"
__copyright__ = "Copyright 2007-2009, The Cogent Project"
__credits__ = ["Gavin Huttley"]
__license__ = "GPL"
__version__ = "1.4.1"
__maintainer__ = "Gavin Huttley"
__email__ = "*****@*****.**"
__status__ = "Production"


def _dinuc_root_probs(x, y=None):
    if y is None:
        y = x
    return dict([(n1 + n2, p1 * p2) for n1, p1 in x.items()
Example #46
0
# -*- coding: utf-8 -*-
"""
Created on Fri May  3 21:15:56 2019

@author: 29132
"""
import pandas as pd
import numpy as np
from functions import *
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
#%%
train_raw = pd.read_csv('./input/train.csv')
test_raw = pd.read_csv('./input/test.csv')
#%%
train = clean_transform_data(train_raw)
test = clean_transform_data(test_raw)
#%%
y = train['Survived']
train.drop(['PassengerId', 'Age', 'Ticket', 'Fare', 'Survived'],
           axis=1,
           inplace=True)
test_PassengerID = test['PassengerId']
test.drop(['PassengerId', 'Age', 'Ticket', 'Fare'], axis=1, inplace=True)
features = pd.concat([train, test]).reset_index(drop=True)
final_features = pd.get_dummies(features).reset_index(drop=True)
X = final_features.iloc[:len(y), :]
X_sub = final_features.iloc[len(y):, :]
#%%
from sklearn.model_selection import train_test_split
def recognize(models: dict, test_set: SinglesData):
    """ Recognize test word sequences from word models set

   :param models: dict of trained models
       {'SOMEWORD': GaussianHMM model object, 'SOMEOTHERWORD': GaussianHMM model object, ...}
   :param test_set: SinglesData object
   :return: (list, list)  as probabilities, guesses
       both lists are ordered by the test set word_id
       probabilities is a list of dictionaries where each key a word and value is Log Liklihood
           [{SOMEWORD': LogLvalue, 'SOMEOTHERWORD' LogLvalue, ... },
            {SOMEWORD': LogLvalue, 'SOMEOTHERWORD' LogLvalue, ... },
            ]
       guesses is a list of the best guess words ordered by the test set word_id
           ['WORDGUESS0', 'WORDGUESS1', 'WORDGUESS2',...]
   """
    warnings.filterwarnings("ignore", category=DeprecationWarning)
    probabilities = []
    guesses = []
    
    # TODO implement the recognizer
    # return probabilities, guesses
    #raise NotImplementedError

    
    # Loop over the entire test_set
    for word_sequences in test_set.get_all_sequences():
        
        # Intialize an empty dict to store the computed score of the  
        # word sequences for each model, keyed by its corresponding key
        scores = {}
        
        # Intialize variables to kKeep track of the best score and corresponding word
        best_score = -math.inf
        word_guess = None
        
        # Retrive the sequence list for each word item in the test_set
        X, lengths = test_set.get_item_Xlengths(word_sequences)
            
        # Iterate over the dictionary of trained models keyed by word    
        for word, model in models.items(): 
            
            try:
                # Score the sequences against the current model iteration
                scores[word] = model.score(X, lengths)
                
                # Keep track of the best score and corresponding word
                if best_score < scores[word]:
                    best_score = scores[word] 
                    word_guess=word
                    
            except:
                
                # The hmmlearn library may not be able to score all models : 
                # in case of error, we move to the next iteration
                scores[word] = None
                pass

        # Add the scores dictionnary for the corresponding word in the probability list
        probabilities.append(scores)
        # and happen the guesses word to the guesses list
        guesses.append(word_guess)

    return probabilities, guesses
import tensorflow as tf
from tensorflow.keras.preprocessing import image
import warnings
import numpy as np
import pandas as pd
import os
import cv2
import matplotlib.pyplot as plt

from parameters import *
import call_model


warnings.filterwarnings(
    action='ignore',
    category=UserWarning,
    module=r'.*TiffImagePlugin'
)

train_datagen = ImageDataGenerator(rescale=1./255,
    validation_split=0.3)

train_generator = train_datagen.flow_from_directory(
    TRAIN_AUG_PATH,
    target_size=(HEIGHT, WIDTH),
    batch_size=BATCH_SIZE,
    class_mode='categorical',
    subset='training',
    shuffle=True,
    seed=42)
import sys
import threading
import traceback
import warnings

from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
from _pydev_bundle.pydev_imports import xmlrpclib, _queue
from _pydevd_bundle.pydevd_constants import Null, IS_PY3K

Queue = _queue.Queue

#This may happen in IronPython (in Python it shouldn't happen as there are
#'fast' replacements that are used in xmlrpclib.py)
warnings.filterwarnings(
    'ignore', 'The xmllib module is obsolete.*', DeprecationWarning)


file_system_encoding = getfilesystemencoding()

#=======================================================================================================================
# _ServerHolder
#=======================================================================================================================
class _ServerHolder:
    '''
    Helper so that we don't have to use a global here.
    '''
    SERVER = None


#=======================================================================================================================
# set_server
Example #50
0
def remove_warnings():
    import warnings
    warnings.filterwarnings(
        'ignore', module='floppyforms',
        message='Unable to import floppyforms.gis'
    )
Example #51
0
import urllib2
import bs4
from bs4 import BeautifulSoup
import datetime
import sys
import warnings

warnings.filterwarnings("ignore", category=UserWarning, module='bs4')

urlis = "http://www.moneycontrol.com/sensex/bse/sensex-live"
page = urllib2.urlopen(urlis)
det_page = BeautifulSoup(page, 'html.parser')

now = datetime.datetime.now()
name = det_page.find('div', class_="FL r_35")
name2 = det_page.find('div', class_="FL r_20 PT10 MT3")
nameperc = det_page.find('div', class_="FL r_15 PT10 MT3 PL5")
#date=det_page.find('div',class_="gL_10_5 PT3")
tab = det_page.find('div', class_='FL PR10').find('table',
                                                  class_='tbldtldata b_15')

print "Todys Date :" + str(now)
print "sensex  : ", str(name.text)
print "sensex change : " + str(name2.text)
print "Percentage change :" + str(nameperc.text) + " %"

a = []
b = []
c = []
for data in tab.find_all("tr"):
    col = data.find_all("td")
Example #52
0
    def __init__(self, n_turns, ring_length, alpha, momentum, 
                 particle_type, user_mass = None, user_charge = None, 
                 particle_type_2 = None, user_mass_2 = None, 
                 user_charge_2 = None, number_of_sections = 1, force_beta_equal_one = False):
        
        #: | *Number of RF sections over the ring; default is one.*
        self.n_sections = number_of_sections
        
        #: | *Particle type. Recognized types are: 'proton' and 'electron'.*
        #: | *Use 'user_input' to input mass and charge manually.*
        #: | *Input particle mass in [eV]* :math:`: \quad m`
        #: | *Input particle charge in [e]* :math:`: \quad q`
        self.particle_type = particle_type
        
        #: *Second particle type: optional; does not affect the momentum, 
        #: energy, beta, and gamma.*
        self.particle_type_2 = particle_type_2
        
        # Attribution of mass and charge with respect to particle_type
        if self.particle_type is 'proton':
            self.mass =  m_p*c**2/e # [eV]
            self.charge = 1. # [e]
        elif self.particle_type is 'electron':
            self.mass =  m_e*c**2/e # [eV]
            self.charge = -1. # [e]
        elif self.particle_type is 'user_input':
            self.mass = user_mass # [eV]
            self.charge = user_charge # [e]
        else:
            raise RuntimeError('ERROR: Particle type not recognized!')
        
        if self.particle_type_2 == None:
            pass
        elif self.particle_type_2 is 'proton':
            self.mass2 =  m_p*c**2/e # [eV]
            self.charge2 = 1. # [e]
        elif self.particle_type_2 is 'electron':
            self.mass2 =  m_e*c**2/e # [eV]
            self.charge2 = -1. # [e]
        elif self.particle_type_2 is 'user_input':
            self.mass2 = user_mass_2 # [eV]
            self.charge2 = user_charge_2 # [e]
        else:
            raise RuntimeError('ERROR: Second particle type not recognized!')
        
        #: *Number of turns of the simulation*
        self.n_turns = n_turns 
        
        if type(momentum)==tuple:
            self.momentum = np.array(momentum[1], ndmin = 2)
            self.cumulative_times = momentum[0]
        else:
            #: | *Synchronous momentum (program) in [eV] for each RF section* :math:`: \quad p_{s,k}^n`
            #: | *Can be given as a single constant value, or as a program of (n_turns + 1) turns.*
            #: | *In case of several sections without acceleration, input: [[momentum_section_1], [momentum_section_2]]*
            #: | *In case of several sections with acceleration, input: [momentum_program_section_1, momentum_program_section_2]*
            self.momentum = np.array(momentum, ndmin = 2)

        #: | *Momentum compaction factor (up to 2nd order) for each RF section* :math:`: \quad \alpha_{k,i}`
        #: | *Should be given as a list for multiple RF stations (each element of the list should be a list of alpha factors up to 2nd order)*
        self.alpha = np.array(alpha, ndmin = 2) 
        
        #: *Number of orders for the momentum compaction*
        self.alpha_order = self.alpha.shape[1]

        #: | *Ring length contains the length of the RF sections, in [m]*
        #: | *Should be given as a list for multiple RF stations*
        self.ring_length = ring_length
        if isinstance(self.ring_length, float) or isinstance(self.ring_length, int):
            self.ring_length = [self.ring_length]
        
        #: | *Ring circumference is the sum of section lengths* :math:`: \quad C = \sum_k L_k`
        self.ring_circumference = np.sum(self.ring_length)
        
        #: *Ring radius in [m]* :math:`: \quad R`
        self.ring_radius = self.ring_circumference/(2*np.pi)         
        
        # Check consistency of input data; raise error if not consistent
        
        if self.n_sections != len(self.ring_length) or \
           self.n_sections != self.alpha.shape[0] or \
           self.n_sections != self.momentum.shape[0]:
            raise RuntimeError('ERROR: Number of sections, ring length, alpha,'+
                               ' and/or momentum data do not match!')    
        
        if self.n_sections > 1:
            if self.momentum.shape[1] == 1:
                self.momentum = self.momentum*np.ones(self.n_turns + 1)
        else:
            if self.momentum.size == 1:
                self.momentum = self.momentum*np.ones(self.n_turns + 1)

        if not self.momentum.shape[1] == self.n_turns + 1:
                raise RuntimeError('The input momentum program does not match'+ 
                ' the proper length (n_turns+1)')
            
        
        #: *Synchronous relativistic beta (program)* :math:`: \quad \beta_{s,k}^n`
        #:
        #: .. math:: \beta_s = \frac{1}{\sqrt{1 + \left(\frac{m}{p_s}\right)^2} }
        
        self.beta = np.sqrt(1/(1 + (self.mass/self.momentum)**2))
        if force_beta_equal_one:
            self.beta = np.array([np.ones(self.n_turns + 1)])
        
        #: *Synchronous relativistic gamma (program)* :math:`: \quad \gamma_{s,k}^n`
        #:
        #: .. math:: \gamma_s = \sqrt{ 1 + \left(\frac{p_s}{m}\right)^2 }
        self.gamma = np.sqrt(1 + (self.momentum/self.mass)**2) 
        
        #: *Synchronous total energy (program) in [eV]* :math:`: \quad E_{s,k}^n`
        #:
        #: .. math:: E_s = \sqrt{ p_s^2 + m^2 }
        self.energy = np.sqrt(self.momentum**2 + self.mass**2)
        
        #: *Synchronous kinetic energy (program) in [eV]
        #: .. math:: E_s^kin = \sqrt{ p_s^2 + m^2 } - m
        self.kin_energy = np.sqrt(self.momentum**2 + self.mass**2) - self.mass
        
        # Be careful that self.cycle_time in the else statement starts always with 0.
        if type(momentum)==tuple:
            #: *Cumulative times [s] taken from preprocess ramp method*
            self.cycle_time = self.cumulative_times
            #: *Revolution period [s]* :math:`: \quad T_0 = \frac{C}{\beta_s c}`
            self.t_rev = np.append(np.diff(self.cycle_time),self.ring_circumference/(self.beta[0][-1]*c))
        else:    
            self.t_rev = np.dot(self.ring_length, 1/(self.beta*c))
            self.cycle_time = np.cumsum(self.t_rev)
            
        #: *Revolution frequency [Hz]* :math:`: \quad f_0 = \frac{1}{T_0}`
        self.f_rev = 1/self.t_rev
         
        
        #: *Revolution angular frequency [1/s]* :math:`: \quad \omega_0 = 2\pi f_0`
        self.omega_rev = 2*np.pi*self.f_rev
        
        #: *Slippage factor (0th order)* :math:`: \quad \eta_{k,0}`
        #:
        #: .. math:: \eta_0 = \alpha_0 - \frac{1}{\gamma_s^2}
        self.eta_0 = 0
        
        #: *Slippage factor (1st order)* :math:`: \quad \eta_{k,1}`
        #:
        #: .. math:: \eta_1 = \frac{3\beta_s^2}{2\gamma_s^2} + \alpha_1 - \alpha_0\eta_0
        self.eta_1 = 0
        
        #: *Slippage factor (2nd order)* :math:`: \quad \eta_{k,2}`
        #:
        #: .. math:: \eta_2 = -\frac{\beta_s^2\left(5\beta_s^2-1\right)}{2\gamma_s^2} + \alpha_2 - 2\alpha_0\alpha_1 + \frac{\alpha_1}{\gamma_s^2} + \alpha_0^2\eta_0 - \frac{3\beta_s^2\alpha_0}{2\gamma_s^2}
        self.eta_2 = 0
        
        # Warning that higher orders for alpha will not be used
        if self.alpha_order > 3:
            warnings.filterwarnings("once")
            warnings.warn("WARNING: Momentum compaction factor is implemented \
                          only up to 2nd order")
            self.alpha_order = 3         
                 
        # Processing the slippage factor
        self.eta_generation()
Example #53
0
    def test01_basics(self):
        if verbose:
            print '\n', '-=' * 30
            print "Running %s.test01_basics..." % self.__class__.__name__

        self.populateDB(self.d)
        self.d.sync()
        self.do_close()
        self.do_open()
        d = self.d

        l = len(d)
        k = d.keys()
        s = d.stat()
        f = d.fd()

        if verbose:
            print "length:", l
            print "keys:", k
            print "stats:", s

        self.assertEqual(0, d.has_key(self.mk('bad key')))
        self.assertEqual(1, d.has_key(self.mk('IA')))
        self.assertEqual(1, d.has_key(self.mk('OA')))

        d.delete(self.mk('IA'))
        del d[self.mk('OA')]
        self.assertEqual(0, d.has_key(self.mk('IA')))
        self.assertEqual(0, d.has_key(self.mk('OA')))
        self.assertEqual(len(d), l - 2)

        values = []
        for key in d.keys():
            value = d[key]
            values.append(value)
            if verbose:
                print "%s: %s" % (key, value)
            self.checkrec(key, value)

        dbvalues = d.values()
        self.assertEqual(len(dbvalues), len(d.keys()))
        with warnings.catch_warnings():
            warnings.filterwarnings('ignore',
                                    'comparing unequal types not supported',
                                    DeprecationWarning)
            self.assertEqual(sorted(values), sorted(dbvalues))

        items = d.items()
        self.assertEqual(len(items), len(values))

        for key, value in items:
            self.checkrec(key, value)

        self.assertEqual(d.get(self.mk('bad key')), None)
        self.assertEqual(d.get(self.mk('bad key'), None), None)
        self.assertEqual(d.get(self.mk('bad key'), 'a string'), 'a string')
        self.assertEqual(d.get(self.mk('bad key'), [1, 2, 3]), [1, 2, 3])

        d.set_get_returns_none(0)
        self.assertRaises(db.DBNotFoundError, d.get, self.mk('bad key'))
        d.set_get_returns_none(1)

        d.put(self.mk('new key'), 'new data')
        self.assertEqual(d.get(self.mk('new key')), 'new data')
        self.assertEqual(d[self.mk('new key')], 'new data')
Example #54
0
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
import numpy as np
import input_data
from sklearn.utils import shuffle
import warnings
warnings.filterwarnings('error')

np.random.seed(9999)
torch.manual_seed(9999)

mnist = input_data.read_data_sets('../MNIST_data', one_hot=False)

X_test = mnist.test.images
t_test = mnist.test.labels


class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()

        self.fc1 = nn.Linear(784, 200, bias=False)
        self.fc2 = nn.Linear(200, 100, bias=False)
        self.fc3 = nn.Linear(100, 10, bias=False)

        self.W = [self.fc1.weight, self.fc2.weight, self.fc3.weight]

    def forward(self, x):
        a1 = self.fc1(x)
Example #55
0
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).

It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.

See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings


# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
                        module='^{0}\.'.format(re.escape(__name__)))

# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
#   X.Y
#   X.Y.Z   # For bugfix releases
#
# Admissible pre-release markers:
#   X.YaN   # Alpha release
#   X.YbN   # Beta release
#   X.YrcN  # Release Candidate
#   X.Y     # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.

#script to decrypt 
import time
from datetime import datetime,date
import mysql.connector
from mysql.connector import Error
import warnings
import redis

warnings.filterwarnings('ignore', message='Unverified HTTPS request')
import requests
import json
from requests.exceptions import Timeout
from requests.auth import HTTPBasicAuth
import pymongo
import urllib.parse
username = urllib.parse.quote_plus('datas')
password=urllib.parse.quote_plus('Data@321')
uri = "mongodb://%s:%[email protected]:27017/admin" % (
    username, password)
client = pymongo.MongoClient(uri)
db = client.oppoAnalytics

headers = {'Content-type': 'application/json','oppokey':"b3Bwb2thc2hlYWRlcjpwYXNzd29yZG9wcG9rYXNo"}
url = 'https://analyticsapi.realmepaysa.com/cms/users/decryptDataoppo'
try:

    connection = mysql.connector.connect(host='10.100.20.55',
                                        port='23306',
                                        user='******',
Example #57
0
import tensorflow as tf
import numpy as np
import os
import datetime
import time
#from text_cnn import TextCNN
import data_helpers_ch_between_before_after as data_helpers
from sklearn.metrics import f1_score
import warnings
import sklearn.exceptions
from sklearn.ensemble import RandomForestClassifier
warnings.filterwarnings("ignore",
                        category=sklearn.exceptions.UndefinedMetricWarning)

# Parameters
# ==================================================

# Data loading params
tf.flags.DEFINE_string("train_dir", "./data/TRAIN_FILE.txt",
                       "Path of train data")
tf.flags.DEFINE_float("dev_sample_percentage", .1,
                      "Percentage of the training data to use for validation")
tf.flags.DEFINE_integer(
    "max_sentence_length", 100,
    "Max sentence length in train(98)/test(70) data (Default: 100)")

# Model Hyperparameters
tf.flags.DEFINE_string("word2vec", None,
                       "Word2vec file with pre-trained embeddings")
tf.flags.DEFINE_integer("text_embedding_dim", 300,
                        "Dimensionality of word embedding (Default: 300)")
import os
import sys; sys.path.append('./..')
import pickle
import networkx as nx # requires 2.3.0
import pandas as pd; pd.options.display.float_format = '{:,.2f}'.format
import statsmodels.stats.api as sm
import warnings; warnings.filterwarnings("ignore", category=UserWarning)
from glob import glob
from statistics import median_low

from src.Tree import TreeNode
from src.Tree import LightTreeNode
from src.utils import load_pickle
from src.graph_stats import GraphStats
from src.graph_comparison import GraphPairCompare

#graphs = ['eucore', 'clique-ring-500-4', 'flights', 'tree', 'chess']
def main(base_path, dataset, models):
    if 'GraphRNN' in models:
        #path = os.path.join(base_path, 'GraphRNN')
        #for subdir, dirs, files in os.walk(path):
        #    if dataset == subdir.split('/')[-1].split('_')[0]:
        #        print(subdir)
        #        for filename in files:
        #            print(filename)
        models.remove('GraphRNN')
    for model in models:
        path = os.path.join(base_path, dataset, model)
        for subdir, dirs, files in os.walk(path):
            for filename in files:
                if 'seq' not in filename:
Example #59
0
# Rename some stuff so "from threading import *" is safe
__all__ = ['activeCount', 'active_count', 'Condition', 'currentThread',
           'current_thread', 'enumerate', 'Event',
           'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread',
           'Timer', 'setprofile', 'settrace', 'local', 'stack_size']

_start_new_thread = thread.start_new_thread
_allocate_lock = thread.allocate_lock
_get_ident = thread.get_ident
ThreadError = thread.error
del thread


# sys.exc_clear is used to work around the fact that except blocks
# don't fully clear the exception until 3.0.
warnings.filterwarnings('ignore', category=DeprecationWarning,
                        module='threading', message='sys.exc_clear')

# Debug support (adapted from ihooks.py).
# All the major classes here derive from _Verbose.  We force that to
# be a new-style class so that all the major classes here are new-style.
# This helps debugging (type(instance) is more revealing for instances
# of new-style classes).

_VERBOSE = False

if __debug__:

    class _Verbose(object):

        def __init__(self, verbose=None):
            if verbose is None:
Example #60
0
import os
import warnings

import scipy
import numpy as np
from sklearn.exceptions import UndefinedMetricWarning

from .classification_report_utils import classification_report

warnings.filterwarnings("ignore", category=UndefinedMetricWarning)


def f1_report_dict_to_str(f1_report, label_names):
    """
    Returns the report string from the f1_report dict.

    Example Output:
                      precision    recall  f1-score   support

         class 0       0.00      0.00      0.00         1
         class 1       1.00      0.67      0.80         3

       micro avg       0.67      0.50      0.57         4
       macro avg       0.50      0.33      0.40         4
    weighted avg       0.75      0.50      0.60         4

    Note: this is generally taken from the `classification_report` function
    inside sklearn.
    :param f1_report: f1 report dictionary from sklearn
    :type f1_report: dict
    :param label_names: names of labels included in the report