Exemplo n.º 1
0
def pack_vectors(problem, **kwargs):

    items = problem.get('items', None)
    bins = problem.get('bins', None)

    heuristic_name = kwargs.get('heuristic', None)
    item_measure_name = kwargs.get('item_measure', None)
    bin_measure_name = kwargs.get('bin_measure', None)
    single = kwargs.get('single', False)

    heuristic = getattr(heuristics, heuristic_name)
    item_measure = getattr(measures, item_measure_name)
    bin_measure = getattr(measures, bin_measure_name)

    item_objects = [IndexedItem(r, i) for i, r in enumerate(items)]
    bin_objects = [IndexedBin(c, i) for i, c in enumerate(bins)]

    # FIXME: single for balance?
    start_time = time.process_time()
    failed = heuristic(item_objects, bin_objects, item_measure, bin_measure)
    stop_time = time.process_time()

    mapping = [None] * len(items)

    # FIXME: probably a more pythonic way to do this...
    for bin_object in bin_objects:
        for item_object in bin_object.items:
            mapping[item_object.index] = bin_object.index

    # FIXME: method names?
    return {
        'mapping' : mapping,
        'algo-runtime' : stop_time - start_time,
    }
Exemplo n.º 2
0
def perfTest(command, host, port):
    global exchange_size
    global tls_exchange_over
    # captureing network packets to get the overall size of the TCP exchange
    capture = Thread(target=sniff_network, args=(host, port))
    # Making the thread a daemon force it to qui when all other threads are exited.
    capture.start()

    # Thread needs a bit of time
    time.sleep(1)
    # Start the timers
    sysstart = time.perf_counter()
    start = time.process_time()
    #Run the command !
    #client_process = subprocess.Popen(command.split(), shell=False)
    #client_process.wait()
    os.system(command)
    # stop the timers
    stop = time.process_time()
    sysstop = time.perf_counter()

    global tls_exchange_over
    tls_exchange_over = True

    stats = {'Time (ms)': (sysstop-sysstart)*1000, 'CPU time (ms)': (stop-start)*1000}
    # cpu usage = processTime / System-wide time
    stats['cpu usage (%)'] = (stats['CPU time (ms)']/stats['Time (ms)'])*100

    #if capturing on localhost, divide the size by 2 because each p acket is sniffed twice.
    if host == "127.0.0.1":
        exchange_size /= 2
    stats['TCP size (bytes)'] = exchange_size
    return stats
Exemplo n.º 3
0
 def wrapper(*args, **kwargs):
     t = time.process_time()
     result = func(*args, **kwargs)
     elapsed_time = time.process_time() - t
     logger.info('function %s executed time: %f s'
                 % (func.__name__, elapsed_time))
     return result
    def evaluate(self, data, labels, site, sess=None):
        """
        Runs one evaluation against the full epoch of data.
        Return the precision and the number of correct predictions.
        Batch evaluation saves memory and enables this to run on smaller GPUs.

        sess: the session in which the model has been trained.
        op: the Tensor that returns the number of correct predictions.
        data: size N x M
            N: number of signals (samples)
            M: number of vertices (features)
        labels: size N
            N: number of signals (samples)
        """
        t_process, t_wall = time.process_time(), time.time()
        scores, loss = self.predict(data, labels, site, sess)

        fpr, tpr, _ = roc_curve(labels, scores)
        roc_auc = auc(fpr, tpr)

        string = 'samples: {:d}, AUC : {:.2f}, loss: {:.4e}'.format(len(labels), roc_auc, loss)

        if sess is None:
            string += '\ntime: {:.0f}s (wall {:.0f}s)'.format(time.process_time() - t_process, time.time() - t_wall)
        return string, roc_auc, loss, scores
Exemplo n.º 5
0
Arquivo: utils.py Projeto: m-kuhn/QGIS
def startProcessingPlugin(packageName):
    """ initialize only the Processing components of a plugin """
    global plugins, active_plugins, iface, plugin_times
    start = time.process_time()
    if not _startPlugin(packageName):
        return False

    errMsg = QCoreApplication.translate("Python", "Couldn't load plugin '{0}'").format(packageName)
    if not hasattr(plugins[packageName], 'initProcessing'):
        del plugins[packageName]
        _unloadPluginModules(packageName)
        msg = QCoreApplication.translate("Python", "{0} - plugin has no initProcessing() method").format(errMsg)
        showException(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2], msg, messagebar=True)
        return False

    # initProcessing
    try:
        plugins[packageName].initProcessing()
    except:
        del plugins[packageName]
        _unloadPluginModules(packageName)
        msg = QCoreApplication.translate("Python", "{0} due to an error when calling its initProcessing() method").format(errMsg)
        showException(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2], msg, messagebar=True)
        return False

    end = time.process_time()
    _addToActivePlugins(packageName, end - start)

    return True
Exemplo n.º 6
0
def solve(impl='python'):
    if impl == 'cython':
        solvercls = csolver.CBruteSolver
    else:
        solvercls = solver.BruteSolver
    try:
        os.mkdir('data/' + impl)
    except FileExistsError:
        pass
    for filename in sorted(glob.glob('data/*.inst.dat')):
        print(filename)
        loaded_data = list(dataloader.load_input(filename))
        count = loaded_data[0]['count']
        correct = list(dataloader.load_provided_results(
            'data/knap_{0:02d}.sol.dat'.format(count)))
        outname = filename.replace('.inst.dat', '.results.jsons')
        outname = outname.replace('data/', 'data/' + impl + '/')
        with open(outname, 'w') as f:
            filestartime = time.process_time()
            for idx, backpack in enumerate(loaded_data):
                startime = time.process_time()
                s = solvercls(backpack)
                backpack['maxcombo'], backpack['maxcost'] = s.solve()
                endtime = time.process_time()
                delta = endtime - startime
                backpack['time'] = delta
                assert backpack['maxcost'] == correct[idx]['maxcost']
                del backpack['items']
                f.write(json.dumps(backpack) + '\n')
            fileendtime = time.process_time()
            delta = fileendtime - filestartime
            f.write('{}\n'.format(delta))
    def fit(self, train_data, train_labels, val_data, val_labels):
        t_process, t_wall = time.process_time(), time.time()
        sess = tf.Session(graph=self.graph)
        shutil.rmtree(self._get_path('summaries'), ignore_errors=True)
        writer = tf.summary.FileWriter(self._get_path('summaries'), self.graph)
        shutil.rmtree(self._get_path('checkpoints'), ignore_errors=True)
        os.makedirs(self._get_path('checkpoints'))
        path = os.path.join(self._get_path('checkpoints'), 'model')
        sess.run(self.op_init)

        # Training.
        accuracies = []
        losses = []
        indices = collections.deque()
        num_steps = int(self.num_epochs * train_data.shape[0] / self.batch_size)
        for step in range(1, num_steps+1):

            # Be sure to have used all the samples before using one a second time.
            if len(indices) < self.batch_size:
                indices.extend(np.random.permutation(train_data.shape[0]))
            idx = [indices.popleft() for i in range(self.batch_size)]

            batch_data, batch_labels = train_data[idx, :, :, :], train_labels[idx]
            if type(batch_data) is not np.ndarray:
                batch_data = batch_data.toarray()  # convert sparse matrices
            feed_dict = {self.ph_data: batch_data, self.ph_labels: batch_labels, self.ph_dropout: self.dropout}
            learning_rate, loss_average = sess.run([self.op_train, self.op_loss_average], feed_dict)

            # Periodical evaluation of the model.
            if step % self.eval_frequency == 0 or step == num_steps:
                epoch = step * self.batch_size / train_data.shape[0]
                print('step {} / {} (epoch {:.2f} / {}):'.format(step, num_steps, epoch, self.num_epochs))
                print('  learning_rate = {:.2e}, loss_average = {:.2e}'.format(learning_rate, loss_average))

                string, auc, loss, scores_summary = self.evaluate(train_data, train_labels, sess)
                print('  training {}'.format(string))

                string, auc, loss, scores_summary = self.evaluate(val_data, val_labels, sess)
                print('  validation {}'.format(string))
                print('  time: {:.0f}s (wall {:.0f}s)'.format(time.process_time()-t_process, time.time()-t_wall))

                accuracies.append(auc)
                losses.append(loss)

                # Summaries for TensorBoard.
                summary = tf.Summary()
                summary.ParseFromString(sess.run(self.op_summary, feed_dict))
                summary.value.add(tag='validation/auc', simple_value=auc)
                summary.value.add(tag='validation/loss', simple_value=loss)
                writer.add_summary(summary, step)
                
                # Save model parameters (for evaluation).
                self.op_saver.save(sess, path, global_step=step)

        print('validation accuracy: peak = {:.2f}, mean = {:.2f}'.format(max(accuracies), np.mean(accuracies[-10:])))
        writer.close()
        sess.close()
        
        t_step = (time.time() - t_wall) / num_steps
        return accuracies, losses, t_step, scores_summary
Exemplo n.º 8
0
    def plan(self):
        """
        Compute the cost grid based on the map represented in the occupancy grid.

        :return: none
        """

        self.total_plan_steps += 1
        start_time = time.process_time()

        self.compute_shortest_path()

        x = int(self.robot.get_cell_x())
        y = int(self.robot.get_cell_y())

        # When there has been a change to the plan rebuild the path.
        self.path = []

        try:
            self.build_path(x, y)
        except RuntimeError as err:
            if str(err) == "maximum recursion depth exceeded in comparison":
                raise NoPathException("No path to Goal!")

        self.time_taken += round(time.process_time() - start_time, 3)
Exemplo n.º 9
0
def diameter_homegrown(graph, weights=None):
    """Compute diameter approximation and time needed to compute it.

    Return a tuple (elapsed_time, diam), where elapsed_time is the time (in
    fractional seconds) needed to compute the approximation to the diameter of
    the graph.

    To compute the approximation, we sample a vertex uniformly at random,
    compute the shortest paths from this vertex to all other vertices, and sum
    the lengths of the two longest paths we found. The returned value is an
    upper bound to the diameter of the graph and is at most 2 times the exact
    value.

    If sample_path is True, sample one of the shortest paths computed for the
    approximation, and set it as graph attribute.

    Homegrown version
    
    """
    logging.info("Computing diameter approximation with igraph implementation")
    # time.process_time() does not account for sleeping time. Seems the right
    # function to use. Alternative could be time.perf_counter()
    start_time = time.process_time()
    diam = graph.diameter_approximation(weights)
    end_time =  time.process_time()
    elapsed_time = end_time - start_time

    logging.info("Diameter approximation is %d, computed in %f seconds", diam, elapsed_time)
    graph["approx_diam"] = diam
    graph["approx_diam_time"] = elapsed_time
    return (elapsed_time, diam)
Exemplo n.º 10
0
 def inner(self, *args, **kwargs):
     start_time = time.process_time()
     result = fun(self, *args, **kwargs)
     elapsed_sec = round(time.process_time() - start_time, 2)
     msg = self.function.__name__ if hasattr(self, 'function') else self.__class__.__name__
     click.secho('Finished {} in {} sec'.format(msg, elapsed_sec), fg='yellow')
     return result
Exemplo n.º 11
0
def sudoku_driver(sudoku, expectedSoln=None):
    """
    Driver method that runs the solver, input: unsolved sudoku.
    Optional: expectedSoln, a solution for correctness
    Prints the Original, then the Solution, and Elapsed process_time.
    Raises a ValueError if no solution can be found.
    Note:
        Add a False as an argument for Problem constructor if you
        do not want pruning. e.g Problem(sudoku, False)
    """

    t = time.process_time()

    print("Original Sudoku:\n%s" % printNestedList(sudoku))

    solutionNode = breadth_first_search(Problem(sudoku))

    if solutionNode is None:
        raise(ValueError("No valid soln found."))

    print("Final Solved Sudoku:\n%s" % printNestedList(sudoku))

    print("Solution Branch (upwards from child -> parent): ", end="")
    ptrNode = solutionNode
    while ptrNode.state is not 0:
        print(ptrNode.state, " ", end="")
        ptrNode = ptrNode.parent

    print("\nElapsed time for soln: ", time.process_time() - t)
    if expectedSoln is not None:
        assert(sudoku == expectedSoln)
        print("Solution Matches Expected Solution! \n")
Exemplo n.º 12
0
def main(args):
    running = True

    # Initialize the object store.
    store = objstore.ObjStore()

    # XXX: I don't know if this is a good method for measuring time delay; it
    # may only count process time, not including sleeps.
    curtime = time.process_time()

    while running:
        newtime = time.process_time()
        timediff = curtime - newtime
        curtime = newtime

        changed = list(store.changed())
        if len(changed):
            # XXX: have to use a hack to get the length; it's an iterator, not a
            # list, so we can just sum the elements.
            print("Changed: %d" % len(changed))

        if random.random() < 0.000001:
            print("Adding new object")
            obj.RealObj(1, 1, 1, objstore=store)

    return 0
Exemplo n.º 13
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument("dataset", help="Path to graph dataset (.gml format)")

    group = parser.add_mutually_exclusive_group(required=True)
    group.add_argument("-k", "--kgroups", type=int, help="Number of groups to generate. Random Sources")
    group.add_argument(
        "-s", "--sources", help="Shortest Path sources. Comma separeted. Ex: Brighton,Edinburgh", default=""
    )

    parser.add_argument("-v", "--verbose", action="store_true", help="Show all vertices value")
    parser.add_argument("-t", "--timeit", action="store_true", help="Print execution time of chosen method")
    parser.add_argument("-p", "--plot", action="store_true", help="Plot the graphs generated")

    args = parser.parse_args()
    args.sources = args.sources.split(",")

    graph = ssp_classification.SSPClassification(args.dataset)

    t = process_time()
    grouped_graph = graph.extract_ssp(args.sources, args.kgroups)
    elapsed_time = process_time() - t

    if args.timeit:
        print("Time: %.5f seconds" % elapsed_time)

    print("Groups formed:")
    for x in nx.connected_components(grouped_graph):
        print(x)

    if args.plot:
        ssp_classification.plot(graph.graph, 1, "Graph")
        ssp_classification.plot(grouped_graph, 2, "Grouped Graph")
        plt.show()
    def performance_and_error_calculation_naive(self, dataarray):
        etimenaive = np.zeros(self.num_estimations + 2, dtype=float)
        etimenaive[0] = 0
        err = np.zeros(self.num_estimations + 1, dtype=float)
        for k in range(0, self.num_estimations + 1):
            current_time = self.window_size + k * self.shift - 1
            assert (current_time < np.shape(dataarray)[1])
            t0 = process_time()
            data0 = dataarray[:, current_time - self.window_size + 1: (current_time + 1)]
            dataslice0 = []

            for i in range(0, self.num_trajectories):
                dataslice0.append(data0[i, :])

            C0 = estimate_via_sliding_windows(data=dataslice0,
                                              num_states=Variable_Holder.num_states,
                                              initial=True)  # count matrix for whole window
            A0 = _tm(C0)
            t1 = process_time()

            etimenaive[k + 1] = t1 - t0 + etimenaive[k]
            err[k] = np.linalg.norm(A0 - self.qmm1_0_0_scaled.eval(k).trans)

        log_total_time_naive = Utility.log_value(etimenaive[-1])
        log_avg_err_naive = Utility.log_value(sum(err) / len(err))

        return log_total_time_naive, log_avg_err_naive
    def calculate_weights(self, threshold=None, p=2, *args, **kwargs):
        """
        Parameters
        ----------
        threshold  : float
                     distance band
        p          : float
                     Minkowski p-norm distance metric parameter:
                     1<=p<=infinity
                     2: Euclidean distance
                     1: Manhattan distance
        """
        if threshold is None:
            if hasattr(self, 'threshold'):
                threshold = self.threshold
            else:
                raise ValueError("Must set threshold first")
        logging.warning('{}: Treshold = {}'.format(self.name, threshold))
        logging.info('{}: Starting weight calculation'.format(self.name))
        t = time.process_time()

        self.weights = pysal.DistanceBand(
            self.points_array, threshold=threshold, p=p, *args, **kwargs)

        logging.debug('{}: Weight calculation elapsed time {}'.format(
            self.name, str(timedelta(seconds=time.process_time() - t))))
        return self.weights
Exemplo n.º 16
0
    def _schedule_processes(self, tasklist, _worker):
        # Reset the global flag that allows
        global _stop_all_processes
        _subprocess_container.stop_all = False
        # Make a shallow copy of the task list,
        # so we don't mess with the callers list.
        tasklist = copy.copy(tasklist)
        number_tasks = len(tasklist)
        if number_tasks == 0:
            totaltime = 0
            return totaltime
        use_threading = number_tasks > 1 and self.num_processes > 1
        starttime = time.process_time()
        task_queue = Queue()
        pbar = _ProgressBar(number_tasks, self.silent)
        pbar.animate(0)
        processed_tasks = []
        n_errors = 0
        threads = []
        try:
            # run while there is still threads, tasks or stuff in the queue
            # to process
            while threads or tasklist or task_queue.qsize():
                # if we aren't using all the processors AND there is still
                # data left to compute, then spawn another thread
                if (len(threads) < self.num_processes) and tasklist:
                    if use_threading:
                        t = Thread(
                            target=_worker, args=tuple([tasklist.pop(0), task_queue])
                        )
                        t.daemon = True
                        t.start()
                        threads.append(t)
                    else:
                        _worker(tasklist.pop(0), task_queue)
                else:
                    # In the case that we have the maximum number
                    # of running threads or we run out tasks.
                    # Check if any of them are done
                    for thread in threads:
                        if not thread.isAlive():
                            threads.remove(thread)
                while task_queue.qsize():
                    task = task_queue.get()
                    if task.has_error():
                        n_errors += 1
                    self.summery.task_summery(task)
                    processed_tasks.append(task)
                    pbar.animate(len(processed_tasks), n_errors)

                time.sleep(0.01)
        except KeyboardInterrupt:
            _display("Processing interrupted")
            _subprocess_container.stop_all = True
            # Add a small delay here. It allows the user to press ctrl-c twice
            # to escape this try-catch. This is usefull when if the code is
            # run in an outer loop which we want to excape as well.
            time.sleep(1)
        totaltime = time.process_time() - starttime
        return totaltime
Exemplo n.º 17
0
def main():
    start = time.process_time()
    ans = calc_pi(1000000)
    end = time.process_time()

    print("PI = {0:.6f}({1:.2f}%)".format(ans, get_error_rate(ans, math.pi) * 100))
    print(end - start)
Exemplo n.º 18
0
    def align(self, parameters=None, anchor_pairs=None):

        # sanity checks
        if self.marker.code != 'ladder':
            raise RuntimeError('E: align() must be performed on ladder channel!')

        if parameters:
            self.scan( parameters )         # in case this channel hasn't been scanned

        ladder = self.fsa.panel.get_ladder()

        # prepare ladder qcfunc
        if 'qcfunc' not in ladder:
            ladder['qcfunc'] =  algo.generate_scoring_function(
                                            ladder['strict'], ladder['relax'] )

        start_time = time.process_time()
        result = algo.align_peaks(self, parameters, ladder, anchor_pairs)
        dpresult = result.dpresult
        fsa = self.fsa
        fsa.z = dpresult.z
        fsa.rss = dpresult.rss
        fsa.nladder = len(dpresult.sized_peaks)
        fsa.score = result.score
        fsa.duration = time.process_time() - start_time
        fsa.status = const.assaystatus.aligned
        fsa.ztranspose = dpresult.ztranspose

        #import pprint; pprint.pprint(dpresult.sized_peaks)
        #print(fsa.z)
        cout('O: Score %3.2f | %5.2f | %d/%d | %s | %5.1f | %s' %
            (fsa.score, fsa.rss, fsa.nladder, len(ladder['sizes']), result.method,
            fsa.duration, fsa.filename) )
    def log_avg_performance_naive(self, dataarray):
        lag = int(Variable_Holder.max_taumeta / self.taumeta)
        etimenaive = np.zeros(self.num_estimations + 1, dtype=float)

        for k in range(0, self.num_estimations + 1):
            current_time = self.window_size + k * self.shift - 1
            assert (current_time < np.shape(dataarray)[1])
            t0 = process_time()
            data0 = dataarray[:, current_time - self.window_size + 1: (current_time + 1)]
            dataslice0 = []

            for i in range(0, self.num_trajectories):
                dataslice0.append(data0[i, :])
            if k == 0:
                # initialization - we found out that calling the count_matrix_coo2_mult function the first time results
                # in lower performance than for following calls - probably due to caching in the background. To avoid
                # this deviation, we call this function once - for starting the cache procedure.
                estimate_via_sliding_windows(data=dataslice0, num_states=Variable_Holder.num_states, initial=True,
                                             lag=lag)

            C0 = estimate_via_sliding_windows(data=dataslice0, num_states=Variable_Holder.num_states,
                                              initial=True)  # count matrix for whole window
            A0 = _tm(C0)
            etimenaive[k] = t0 - process_time()
        log_total_time_naive = Utility.log_value(etimenaive[-1])
        return log_total_time_naive
Exemplo n.º 20
0
    def evaluate(self, data, labels, sess=None):
        """
        Runs one evaluation against the full epoch of data.
        Return the precision and the number of correct predictions.
        Batch evaluation saves memory and enables this to run on smaller GPUs.

        sess: the session in which the model has been trained.
        op: the Tensor that returns the number of correct predictions.
        data: size N x M
            N: number of signals (samples)
            M: number of vertices (features)
        labels: size N
            N: number of signals (samples)
        """
        t_process, t_wall = time.process_time(), time.time()
        predictions, loss = self.predict(data, labels, sess)
        #print(predictions)
        ncorrects = sum(predictions == labels)
        accuracy = 100 * sklearn.metrics.accuracy_score(labels, predictions)
        f1 = 100 * sklearn.metrics.f1_score(labels, predictions, average='weighted')
        string = 'accuracy: {:.2f} ({:d} / {:d}), f1 (weighted): {:.2f}, loss: {:.2e}'.format(
                accuracy, ncorrects, len(labels), f1, loss)
        if sess is None:
            string += '\ntime: {:.0f}s (wall {:.0f}s)'.format(time.process_time()-t_process, time.time()-t_wall)
        return string, accuracy, f1, loss
Exemplo n.º 21
0
def get_time_list():
    list_len = []
    time_used_ratio = []

    for n in a:
        list_len.append(n)
        l = list(range(n)) # create a list with numbers 0 ... to n-1
        time_sum_p = 0
        time_sum_foo = 0
        print(n)
        for i in range(1,50,1): #run 50 times to get the average time of function p for each list length
            random.shuffle(l) # randomize the list
            timeStamp_p = time.process_time() # get the current cpu time
            p(l, 0, len(l)) # run p function
            timeLapse_p = time.process_time() - timeStamp_p
            time_sum_p = time_sum_p + timeLapse_p
            
        for b in range(1,25,1): #run 25 times to get the average time of function foo for each time length
            random.shuffle(l)
            timeStamp_foo = time.process_time() # get the current cpu time
            foo(l,0,len(l)) # run p function
            timeLapse_foo = time.process_time() - timeStamp_foo
            time_sum_foo = time_sum_foo + timeLapse_foo
            
        time_ave_p = time_sum_p / 50
        time_ave_foo = time_sum_foo/25
        time_ave = time_ave_foo / time_ave_p #get  the ratio of average time of p and foo
        time_used_ratio.append(time_ave)
    return [list_len, time_used_ratio]
Exemplo n.º 22
0
def get_time_array():
    list_len = []
    time_used_ratio = []

    for n in a:
        list_len.append(n)
        l = np.array(range(n)) # create a array
        time_sum_p = 0
        time_sum_foo = 0
        print(n)
        for i in range(1,50,1):
            random.shuffle(l) # randomize the list
            timeStamp_p = time.process_time() # get the current cpu time
            p(l, 0, len(l)) # run p function
            timeLapse_p = time.process_time() - timeStamp_p
            time_sum_p = time_sum_p + timeLapse_p
            
        for b in range(1,25,1):
            random.shuffle(l)
            timeStamp_foo = time.process_time() # get the current cpu time
            foo(l,0,len(l)) # run p function
            timeLapse_foo = time.process_time() - timeStamp_foo
            time_sum_foo = time_sum_foo + timeLapse_foo
            
        time_ave_p = time_sum_p / 50
        time_ave_foo = time_sum_foo/25
        time_ave = time_ave_foo / time_ave_p
        time_used_ratio.append(time_ave)
    return [list_len, time_used_ratio]
Exemplo n.º 23
0
def transform_source(input_source, *, options=None, query_options=None):
    """Take in the Python source code to a module and return the
    transformed source code and the symbol table.
    """
    tree = P.Parser.p(input_source)
    
    t1 = process_time()
    tree, symtab = transform_ast(tree, options=options,
                                 query_options=query_options)
    t2 = process_time()
    
    source = P.Parser.ts(tree)
    # All good human beings have trailing newlines in their
    # text files.
    source = source + '\n'
    
    symtab.stats['lines'] = get_loc_source(source)
    # L.tree_size() is for IncASTs, but it should also work for
    # Python ASTs. We have to re-parse the source to get rid of
    # our Comment pseudo-nodes.
    tree = P.Parser.p(source)
    symtab.stats['ast_nodes'] = L.tree_size(tree)
    symtab.stats['time'] = t2 - t1
    
    return source, symtab
Exemplo n.º 24
0
def print_progress(iteration, total, start, prefix = '', suffix = '', decimals = 2, barLength = 100):
    """Call in a loop to create terminal progress bar
    @params:
        iteration   - Required  : current iteration (Int)
        total       - Required  : total iterations (Int)
        prefix      - Optional  : prefix string (Str)
        suffix      - Optional  : suffix string (Str)
    """
    filledLength    = int(round(barLength * iteration / float(total)))
    percents        = round(100.00 * (iteration / float(total)), decimals)
    bar             = '#' * filledLength + '-' * (barLength - filledLength)
    global metrics
    global START_TIME
    global speed
    if (time.process_time() - START_TIME) * 1000  > 5:
        START_TIME = time.process_time()
        speed           = round((iteration*8//(time.process_time() - start)//1024), decimals)
        metrics         = 'Kbps'
        if speed > 1024:
            speed = speed//1024
            metrics = 'Mbps'

    Sys.stdout.write('%s [%s] %s%s %s%s %s\r' % (prefix, bar, percents, '%', suffix, speed, metrics)),
    Sys.stdout.flush()
    if iteration == total:
        print("\n")
Exemplo n.º 25
0
def plotruntime(func, reps, x_arr, singleComponent=False):
    x_y_arr = {}
    for it in range(1,reps):
        for x in x_arr:
            if(singleComponent==True):
                graph = createRandConnectedGraph(x,3*x)
            else:
                graph = createRandomGraph(x,3*x)
            print('x = ', x)
            print("Nodes: %d, vertices: %d" % (x, 3*x))         
            timeStamp = time.process_time() # Start Time
            func(graph) # run p function
            timeLapse = time.process_time() - timeStamp
            print('timeLapse = ', timeLapse)
            
            if it==1: # Add first element, append rest 
                x_y_arr[x] = [timeLapse]
            else:
                x_y_arr[x].append(timeLapse)
       
    # Average runtimes for each x        
    for k in x_y_arr:
        x_y_arr[k] = np.mean(x_y_arr[k])

    # Plot using matplotlib.pyplot
    plt.xlabel('n')
    plt.ylabel('time (in seconds)')
    plt.title('Run times for different n\'s ')
    plt.plot(list(x_y_arr.keys()), list(x_y_arr.values()), 'ro')
    plt.show()
    return x_y_arr    
    def _t(self, slot, fn):
        start = time.process_time()
        n = len(fn())
        d = time.process_time() - start

        setattr(self, 'len_' + slot, n)
        setattr(self, 'time_' + slot, d)
Exemplo n.º 27
0
def test_burn():
    with stats.record_burn('foo', url='http://example.com/'):
        t0 = time.process_time()
        while time.process_time() < t0 + 0.001:
            pass

    assert stats.burners['foo']['count'] == 1
    assert stats.burners['foo']['time'] > 0 and stats.burners['foo']['time'] < 0.3
    assert 'list' not in stats.burners['foo']  # first burn never goes on the list

    with stats.record_burn('foo', url='http://example.com/'):
        t0 = time.process_time()
        while time.process_time() < t0 + 0.2:
            pass

    assert stats.burners['foo']['count'] == 2
    assert stats.burners['foo']['time'] > 0 and stats.burners['foo']['time'] < 0.3
    assert len(stats.burners['foo']['list']) == 1

    stats.update_cpu_burn('foo', 3, 3.0, set())
    assert stats.burners['foo']['count'] == 5
    assert stats.burners['foo']['time'] > 3.0 and stats.burners['foo']['time'] < 3.3
    assert len(stats.burners['foo']['list']) == 1

    stats.report()
    # Create an empty array to store our baseline measurements in
    visibility_data = visibility_grid

    real_component = interpolate.RegularGridInterpolator(
        [u_bin_centers, v_bin_centers], numpy.real(visibility_data))
    imag_component = interpolate.RegularGridInterpolator(
        [u_bin_centers, v_bin_centers], numpy.imag(visibility_data))

    visibilities = real_component(
        baseline_coordinates.T) + 1j * imag_component(baseline_coordinates.T)

    return visibilities


if __name__ == "__main__":
    start = time.process_time()
    parser = argparse.ArgumentParser(
        description='Broken Tile Simulation Set Up')
    parser.add_argument('-beam', action='store', default="gaussian", type=str)
    parser.add_argument('-broken_dipole', action='store', default=1, type=int)
    parser.add_argument('-broken_tile', action='store', default=1, type=int)
    parser.add_argument('-number_channels',
                        action='store',
                        default=100,
                        type=int)
    parser.add_argument('-calibrate', action='store_true', default=True)
    parser.add_argument('-verbose', action='store_true', default=True)
    args = parser.parse_args()
    main(args.beam, args.broken_dipole, args.broken_tile, args.number_channels,
         args.calibrate, args.verbose)
    end = time.process_time()
Exemplo n.º 29
0
    filename = os.path.abspath(__file__).split('\\')[-1].split('.')[0]
    timeer0 = datetime.datetime.fromtimestamp(time.time()).strftime('%m_%d=%H')
    datapath = r'F:\vnpy_my_gitee\new_company\hsi_data_1min\HSI2011-2019_12.csv'
    dir_path = r'F:\task\恒生股指期货\numba_策略开发\numba_突破max01_突破_空策略'
    strat_time = np.array([1, 9, 20])
    end_time = np.array([27, 16, 20])
    # 1.先测试单进程 cpu == 1
    a = 1
    b = 10
    c = 10

    if True == a:
        df_time_list = [['2019-10-01 09:15:00', '2019-12-26 16:25:00']]
        s_time, e_time = df_time_list[0]
        df = get_loc_hsicsv(s_time, e_time, datapath)  # 获取本地数据
        time0 = time.process_time()
        df000 = df.copy()
        # df['bh']=talib.MFI(df['high'], df['low'], df['close'], df['volume'],120)
        # df['bh'] = talib.MAMA(df['close'], 30)
        # df['ma']= talib.EMA(df['close'],30)
        # N = 10
        # X = 2 * talib.WMA(df['close'], int(N / 2)) - talib.WMA(df['close'], N)
        # df['hma'] = talib.WMA(X, int(N**0.5))

        df = zsy_bollin_01(df000, para=[90, 2.5])
        print(df.tail())
        dfres, res0 = cal_huice_hsi(df, canshu=[90, 2.5])
        print(res0)
        # df['p'] = (talib.RSI(df['close'], 14))/100
        # df['mx'] = talib.SMA(df['close'], 20)
        # df['md'] = talib.SMA(df['close'], 60 )
Exemplo n.º 30
0
    'USDTRY', 'USDZAR', 'ZARJPY'
]

interval = "1min"
today = date.today()

source_latest_open_close = pd.read_pickle(
    "data/open_closes_source_latest_2019-10-01.pkl").sort_index()
closes = source_latest_open_close[[
    close for close in source_latest_open_close.columns.tolist()
    if "close" in close
]]

feats = []

t1_start = process_time()

# print(mom1d['2019-09-19 06:38:00':'2019-09-19 06:47:00'], non_parall_mom1d['2019-09-19 06:38:00':'2019-09-19 06:47:00'])

## TODO momentum and change of momentun
'''
mom1d = closes.swifter.apply(featGen.momentum, axis=0, args=(1,'D')).fillna(method='ffill').add_prefix('mom1d_')
mom5d = closes.swifter.apply(featGen.momentum, axis=0, args=(5,'D')).fillna(method='ffill').add_prefix('mom5d_')
mom10d = closes.swifter.apply(featGen.momentum, axis=0, args=(10,'D')).fillna(method='ffill').add_prefix('mom10d_')

mom5h = closes.swifter.apply(featGen.momentum, axis=0, args=(5,'H')).fillna(method='ffill').add_prefix('mom5h_')
mom1h = closes.swifter.apply(featGen.momentum, axis=0, args=(1,'H')).fillna(method='ffill').add_prefix('mom1h_')
mom10h = closes.swifter.apply(featGen.momentum, axis=0, args=(10,'H')).fillna(method='ffill').add_prefix('mom10h_')

mom30min = closes.swifter.apply(featGen.momentum, axis=0, args=(30,'min')).fillna(method='ffill').add_prefix('mom30min_')
mom15min = closes.swifter.apply(featGen.momentum, axis=0, args=(15,'min')).fillna(method='ffill').add_prefix('mom15min_')
Exemplo n.º 31
0
def train_Keras(train_X,
                train_y,
                test_X,
                test_y,
                kwargs,
                e2efs_class=None,
                n_features=None,
                epochs=150,
                fine_tuning=True):
    normalization = normalization_func()
    num_classes = train_y.shape[-1]

    norm_train_X = normalization.fit_transform(train_X)
    norm_test_X = normalization.transform(test_X)

    batch_size = max(2, len(train_X) // 50)
    class_weight = train_y.shape[0] / np.sum(train_y, axis=0)
    class_weight = num_classes * class_weight / class_weight.sum()
    sample_weight = None
    print('mu :', kwargs['mu'], ', batch_size :', batch_size)
    print('reps : ', reps, ', weights : ', class_weight)
    if num_classes == 2:
        sample_weight = np.zeros((len(norm_train_X), ))
        sample_weight[train_y[:, 1] == 1] = class_weight[1]
        sample_weight[train_y[:, 1] == 0] = class_weight[0]
        class_weight = None

    svc_model = LinearSVC(nfeatures=norm_train_X.shape[1:], **kwargs)
    svc_model.create_keras_model(nclasses=num_classes)

    model_clbks = [
        callbacks.LearningRateScheduler(scheduler()),
    ]

    fs_callbacks = []

    if e2efs_class is not None:
        classifier = svc_model.model
        e2efs_layer = e2efs_class(n_features,
                                  input_shape=norm_train_X.shape[1:])
        model = e2efs_layer.add_to_model(classifier,
                                         input_shape=norm_train_X.shape[1:])
        fs_callbacks.append(clbks.E2EFSCallback(units=10, verbose=verbose))
    else:
        model = svc_model.model
        e2efs_layer = None

    optimizer = optimizer_class(e2efs_layer, lr=initial_lr)

    model.compile(loss=LinearSVC.loss_function(loss_function, class_weight),
                  optimizer=optimizer,
                  metrics=[LinearSVC.accuracy])

    if e2efs_class is not None:
        model.fs_layer = e2efs_layer
        model.heatmap = e2efs_layer.moving_heatmap

        start_time = time.process_time()
        model.fit(norm_train_X,
                  train_y,
                  batch_size=batch_size,
                  epochs=200000,
                  callbacks=fs_callbacks,
                  validation_data=(norm_test_X, test_y),
                  class_weight=class_weight,
                  sample_weight=sample_weight,
                  verbose=verbose)
        model.fs_time = time.process_time() - start_time

    if fine_tuning:
        model.fit(norm_train_X,
                  train_y,
                  batch_size=batch_size,
                  epochs=epochs,
                  callbacks=model_clbks,
                  validation_data=(norm_test_X, test_y),
                  class_weight=class_weight,
                  sample_weight=sample_weight,
                  verbose=verbose)

    model.normalization = normalization

    return model
Exemplo n.º 32
0
# Data retrieval without using SQLite index #
# > Sequential version (single process) <   #
#############################################

PRINT_PROGRESS = True
OUTPUT_FILE = None # Set to None to print to console


if __name__ == "__main__":
	# Parse parameters
	if len(sys.argv) < 2:
		print(f"Error: Missing search parameter!")
		sys.exit(1)
	query_text = sys.argv[1]

	time_start = time.process_time() # Start timer

	# Preprocess query
	query_words, _, _ = preprocessing.preprocess_text(query_text)
	query_words = searching.remove_duplicates(query_words)
	
	search_results = []

	# Start search
	for site in config.INPUT_SITES:
		site_path = config.INPUT_PATH + "/" + site
		# Loop over all pages
		padding = (max([len(x) for x in config.INPUT_SITES]) - len(site)) * " " # Add spaces to align progress bars
		for page in tqdm(os.listdir(site_path), desc=f"Searching {site}{padding}", unit="pages", disable=not PRINT_PROGRESS):
			# Only process html files with the same name as site
			if not (page.startswith(site) and page.endswith(".html")):
Exemplo n.º 33
0
def cal_huice_hsi(df, canshu):
    '''

    :param df: 传入数据必须有:‘candle_begin_time', 'open', 'high', 'low', 'close', 'volume',’signal‘
    :param strat_time:
    :param end_time:
    :param canshu:
    :return:
    '''
    a = time.process_time()

    from datetime import timedelta
    df = zsy_bollin_01(df, canshu)

    df['candle_begin_time'] = (df['candle_begin_time'] -
                               np.datetime64(0, 's')) / timedelta(seconds=1)
    df['days'] = pd.to_datetime(
        df["candle_begin_time"],
        unit='s').apply(lambda x: float(x.to_pydatetime().day))
    df['huors'] = pd.to_datetime(
        df["candle_begin_time"],
        unit='s').apply(lambda x: float(x.to_pydatetime().hour))
    df['minutes'] = pd.to_datetime(
        df["candle_begin_time"],
        unit='s').apply(lambda x: float(x.to_pydatetime().minute))
    #  ===信号仓位统计数据列
    if 'signal' not in df.keys():
        df['signal'] = 0
        df['pos'] = 0
    else:
        df['pos'] = df['signal'].shift()
        df['pos'].fillna(method='ffill', inplace=True)
        df['pos'].fillna(value=0, inplace=True)
        df['opne_price'] = np.nan
        df['per_lr'] = np.nan
        df['sl'] = np.nan
    # ===转化成np.array
    df0cols = [
        'candle_begin_time', 'open', 'high', 'low', 'close', 'volume', 'days',
        'huors', 'minutes'
    ]
    df1cols = [
        'candle_begin_time', 'signal', 'pos', 'opne_price', 'per_lr', 'sl'
    ]
    df2cols = [i for i in df.keys() if i in df0cols + df1cols]
    df0 = df[df0cols].values
    df1 = df[df1cols].values
    df2 = df[df2cols].values
    b = time.process_time()

    df0, df1, df2, res0 = cal_signal_hsi(df0, df1, df2, np.array(canshu))
    print('核心时间:', time.process_time() - b, 's')

    # 转换成df数据
    df00 = pd.DataFrame(df0, columns=df0cols)
    df11 = pd.DataFrame(df1, columns=df1cols)
    df22 = pd.DataFrame(df2, columns=df2cols)
    # 合并
    df11_ = pd.merge(df00, df11, on="candle_begin_time", suffixes=('_0', '_1'))
    dfres = pd.merge(df11_, df22, on="candle_begin_time", suffixes=('', '_2'))
    dfres["candle_begin_time"] = pd.to_datetime(dfres["candle_begin_time"],
                                                unit='s')
    dfres.sort_values(by='candle_begin_time', inplace=True)
    cols = [
        '最后收益', '最大收益', '模拟最大回撤', '赢撤率', '夏普率', '平均收益', '开仓次数', '胜率', '盈亏比'
    ] + [f'参数{i}' for i in range(1,
                                 len(canshu) + 1)]
    print('runingtime:', time.process_time() - a, 's', res0[:3])

    return dfres, res0
Exemplo n.º 34
0
def duojincheng_backtesting(df_input,
                            zong_can,
                            strat_time,
                            end_time,
                            cpu_nums=3,
                            jiexi=False):
    df = df_input.copy()
    if cpu_nums > cpu_count() - 1: cpu_nums = cpu_count() - 1
    huice_df = []

    def tianjia(res):
        huice_df.append(res[-1])
        # print(res[-1])

    pass
    #  ===原始数据处理
    df['candle_begin_time'] = (df['candle_begin_time'] -
                               np.datetime64(0, 's')) / timedelta(seconds=1)
    df['days'] = pd.to_datetime(
        df["candle_begin_time"],
        unit='s').apply(lambda x: float(x.to_pydatetime().day))
    df['huors'] = pd.to_datetime(
        df["candle_begin_time"],
        unit='s').apply(lambda x: float(x.to_pydatetime().hour))
    df['minutes'] = pd.to_datetime(
        df["candle_begin_time"],
        unit='s').apply(lambda x: float(x.to_pydatetime().minute))
    #  ===信号仓位统计数据列
    df['signal'] = np.nan
    df['pos'] = 0
    df['opne_price'] = np.nan
    df['per_lr'] = np.nan
    df['sl'] = np.nan
    #  ===指标数据
    df['止损价'] = np.nan
    df['止盈价'] = np.nan
    df['日内最低价'] = np.nan
    df['日内最高价'] = np.nan
    df['开仓线'] = np.nan
    df['小均线'] = np.nan
    df['大均线'] = np.nan

    df = df[(df['huors'] >= 2) & (df['huors'] <= 16) &
            (df['days'] <= end_time[0])]
    pass

    # 多进程回测
    if cpu_nums > 1:
        p = Pool(processes=cpu_nums)
        for j in range(0, len(zong_can), cpu_nums):
            for i in range(cpu_nums):
                if j + i <= len(zong_can) - 1:
                    canshu0 = zong_can[j + i]
                    p.apply_async(
                        cal_signal,
                        args=(
                            df,
                            strat_time,
                            end_time,
                            np.array(canshu0),
                        ),
                        callback=tianjia,
                    )
                else:
                    break
        p.close()
        p.join()
        print('进程池joined')
        # 整理多进程回测的数据
        cols = [
            '最后收益', '最大收益', '模拟最大回撤', '赢撤率', '夏普率', '平均收益', '开仓次数', '胜率', '盈亏比'
        ] + [f'参数{i}' for i in range(1,
                                     len(zong_can[0]) + 1)]
        resdf = pd.DataFrame(huice_df, columns=cols)
        resdf = resdf[cols]
        resdf.sort_values(by='最后收益', inplace=True)
        print(resdf.iloc[-10:])
        print(f'=参数回测结束,谢谢使用.')
        return resdf

    # 单进程测试
    else:
        for cs0 in zong_can:
            a = time.process_time()
            df0, df1, df2, cols, res0 = cal_signal(df, strat_time, end_time,
                                                   np.array(cs0))
            print('runingtime:', time.process_time() - a, 's')
            tianjia(res=res0)
        if jiexi:
            return

        df0cols, df1cols, df2cols = cols
        # 转换成df数据
        df00 = pd.DataFrame(df0, columns=df0cols)
        df11 = pd.DataFrame(df1, columns=df1cols)
        df22 = pd.DataFrame(df2, columns=df2cols)
        # 合并
        df11_ = pd.merge(df00,
                         df11,
                         on="candle_begin_time",
                         suffixes=('_0', '_1'))
        dfres = pd.merge(df11_,
                         df22,
                         on="candle_begin_time",
                         suffixes=('', '_2'))
        dfres["candle_begin_time"] = pd.to_datetime(dfres["candle_begin_time"],
                                                    unit='s')
        dfres.sort_values(by='candle_begin_time', inplace=True)
        cols = [
            '最后收益', '最大收益', '模拟最大回撤', '赢撤率', '夏普率', '平均收益', '开仓次数', '胜率', '盈亏比'
        ] + [f'参数{i}' for i in range(1,
                                     len(zong_can[0]) + 1)]
        res0 = pd.DataFrame([res0], columns=cols)
        print(dfres.iloc[-10:])

        return dfres, res0
Exemplo n.º 35
0
 def inner(*args, **kwargs):
     time.process_time()
     fn(*args, **kwargs)
     print(time.process_time())
Exemplo n.º 36
0
		if os.path.isdir(pathto):
			pass
		else:
			sg.PopupError('No path or the one selected is invalid. Run the program again.', pathto)
			sys.exit()
	
	elif values[2] == True:
			extracttype = 'zip'
			pathto = values[3]
			if pathto.endswith('.zip'):
				pass
			else:
				sg.PopupError('No file or no .zip extension selected. Run the program again.', pathto)
				sys.exit()
	
	start = process_time()
	
	
	tosearch = {'wellbeing': '*/com.google.android.apps.wellbeing/databases/*',
				'wellbeingaccount':'*/com.google.android.apps.wellbeing/files/AccountData.pb',
				'usagestats':'*/usagestats/*',
				'recentactivity':'*/system_ce/*'}
	'''
	tosearch = {'lastbuild': '*LastBuildInfo.plist',
				'interactionc':'*interactionC.db'}
	'''
			
	os.makedirs(reportfolderbase)
	os.makedirs(reportfolderbase+'Script Logs')
	logfunc('Procesing started. Please wait. This may take a few minutes...')
Exemplo n.º 37
0
 def process_queue(self):
     start = time.process_time()
     while self.queue and time.process_time() - start < 1 / 60.0:
         self.dequeue()
Exemplo n.º 38
0
def endTimer(timeit, msg):
    global startTime
    endTime = time.process_time()
    if (timeit):
        write(msg, endTime - startTime, file=sys.stderr)
        startTime = None
Exemplo n.º 39
0
def main(req: func.HttpRequest) -> func.HttpResponse:
    logging.info('Python HTTP trigger function processed a request.')
    starttime = process_time()

    scan_type = req.route_params.get('scan')
    view = req.route_params.get('view')
    name = req.route_params.get('name')

    """ Check to ensure ALL parameters were passed in the URI.
    If you mark the route parameters in function.json as mandatory,
     the Azure Function worker supplies a 404 if you do not supply all
     three routes in the URI. I made routes optional, this way we
     can handle errors gracefully """
    if scan_type is None or view is None or name is None:
        error = {"Message": ("Please pass three parameters in the URI:"
                             " valid scan type: policy or full, "
                             "valid DNS view: internal or external, "
                             "and a valid DNS domain name")
                 }
        return json.dumps(error)

    # Check to ensure a valid scan type was passed
    scan_type = scan_type.lower()
    if scan_type not in valid_scan_types:
        error = {"Scan Type:": f'{scan_type}',
                 "Message": ("Please pass a valid scan"
                             " type: policy or full")
                 }
        return json.dumps(error)

    # Check to ensure a valid DNS view was passed
    view = view.lower()
    if view not in dnsview:
        error = {"View:": f'{view}',
                 "Message": ("Please pass a valid DNS view"
                             ": internal or external")
                 }
        return json.dumps(error)

    if dnsview.get(view) == '0.0.0.0':
        error = {"View:": f'{view}',
                 "Message": ("Please specify a valid DNS server"
                             " in config.ini")
                 }
        return json.dumps(error)

    # Parse the name parameter to ensure it is a valid DNS name
    # and does not contain http(s)
    try:
        name = shared_dns.parse_name(name)
    except Exception:
        error = results.set_error(f'{name}', "Not a valid formatted DNS name")
        return json.dumps(error)

    """ Try to resolve the DNS name to an IP to ensure it exists.
     We use the IP in the scan so that we can record which one we tested
     which can be useful. """
    try:
        ip = shared_dns.resolve_dns(dnsview.get(view), name)
    except Exception as err:
        error = results.set_error(f'{name}',
                                  str(err))
        return json.dumps(error)

    # Run the scan
    scanjob = scanner.scan(name, ip, view, scan_type)
    elapsedtime = process_time() - starttime
    logging.info(f'{name} processed for {elapsedtime}')
    return json.dumps(scanjob)
Exemplo n.º 40
0
NAtom=108            #Number of atoms
# Set density and temperatures (gm/cm^3 and K)
Dens=.1
Temp=150.

#Set number of equilibration and simulation time steps
MaxEqb=100
MaxStep=1000

# Convert to dimensionless units
Density=Dens/1.68247         #Density  (1.0=1.68247 gm/cm^3)
Temperature=Temp/119.4      #Temperature (1.0=119.4K )


###### Start timing ######
start_time=time.process_time()

###### Set random seed ######
random.seed(1)

###### Setup Simulations #######       
asystem=system(NAtom, Density, Temperature, MaxEqb, MaxStep) #Create system object
asystem.setfcc()          #Fill with NAtom atoms on FCC lattice
asystem.setvel()          #Set atom velocities
asystem.printparam()      #Print simulation parameters
asystem.evalforce()       #Initialize force calculation
asystem.scaleforces()     #Scale forces for initial step
asystem.printproplabel()  #Print header for property output

###### Run Equilibration #######       
for step in range(1,asystem.MaxEqb+1):
Exemplo n.º 41
0
    GREEN.fit(greenX, greenY)
    fitting_cost = 0  # should use score() from the regression
    # print RED_a_, RED_b
    # print BLUE_a_, BLUE_b
    # print GREEN_a_, GREEN_b
    return (
        (RED.coef_, RED.intercept_),
        (BLUE.coef_, BLUE.intercept_),
        (GREEN.coef_, GREEN.intercept_),
        fitting_cost,
    )


if __name__ == "__main__":
    img_filename = "test2.jpg"
    if len(sys.argv) > 1:
        img_filename = sys.argv[1]
        logger.info(img_filename)
    cv_img = cv2.imread(img_filename)
    t1 = time.process_time()
    testdata = getimgdatapts(cv_img)
    t2 = time.process_time()
    logger.info("Time taken:")
    logger.info((t2 - t1))

    trained = runKMeans(testdata)
    mapping = identifyColors(trained[0], CENTERS)
    getparameters(mapping, trained[0], CENTERS)

    # from anti_instagram import AntiInstagram Why is this here?
def ExportSingleFbxAction(originalScene, dirpath, filename, obj, targetAction,
                          actionType):
    #Export a single action like a animation or pose

    scene = bpy.context.scene
    addon_prefs = bpy.context.preferences.addons[
        "blender-for-unrealengine"].preferences

    filename = ValidFilenameForUnreal(filename)
    curr_time = time.process_time()
    if obj.animation_data is None:
        obj.animation_data_create()
    userAction = obj.animation_data.action  #Save current action
    userAction_extrapolation = obj.animation_data.action_extrapolation
    userAction_blend_type = obj.animation_data.action_blend_type
    userAction_influence = obj.animation_data.action_influence

    if bpy.ops.object.mode_set.poll():
        bpy.ops.object.mode_set(mode='OBJECT')

    SelectParentAndDesiredChilds(obj)
    ApplyExportTransform(obj)

    ResetArmaturePose(obj)
    if (scene.is_nla_tweakmode == True):
        obj.animation_data.use_tweak_mode = False  #animation_data.action is ReadOnly with tweakmode in 2.8
    obj.animation_data.action = targetAction  #Apply desired action and reset NLA
    obj.animation_data.action_extrapolation = 'HOLD'
    obj.animation_data.action_blend_type = 'REPLACE'
    obj.animation_data.action_influence = 1
    scene.frame_start = GetDesiredActionStartEndTime(obj, targetAction)[0]
    scene.frame_end = GetDesiredActionStartEndTime(obj, targetAction)[1]

    absdirpath = bpy.path.abspath(dirpath)
    VerifiDirs(absdirpath)
    fullpath = os.path.join(absdirpath, filename)

    #Set rename temporarily the Armature as "Armature"
    oldArmatureName = RenameArmatureAsExportName(obj)

    bpy.ops.export_scene.fbx(
        filepath=fullpath,
        check_existing=False,
        use_selection=True,
        global_scale=GetObjExportScale(obj),
        object_types={'ARMATURE', 'EMPTY', 'MESH'},
        use_custom_props=addon_prefs.exportWithCustomProps,
        add_leaf_bones=False,
        use_armature_deform_only=obj.exportDeformOnly,
        bake_anim=True,
        bake_anim_use_nla_strips=False,
        bake_anim_use_all_actions=False,
        bake_anim_force_startend_keying=True,
        bake_anim_step=GetAnimSample(obj),
        bake_anim_simplify_factor=obj.SimplifyAnimForExport,
        use_metadata=addon_prefs.exportWithMetaData,
        primary_bone_axis=obj.exportPrimaryBaneAxis,
        secondary_bone_axis=obj.exporSecondaryBoneAxis,
        axis_forward=obj.exportAxisForward,
        axis_up=obj.exportAxisUp,
        bake_space_transform=False)

    #Reset armature name
    ResetArmatureName(
        obj,
        oldArmatureName,
    )

    ResetArmaturePose(obj)
    obj.animation_data.action = userAction  #Resets previous action and NLA
    obj.animation_data.action_extrapolation = userAction_extrapolation
    obj.animation_data.action_blend_type = userAction_blend_type
    obj.animation_data.action_influence = userAction_influence
    exportTime = time.process_time() - curr_time

    MyAsset = originalScene.UnrealExportedAssetsList.add()
    MyAsset.assetName = filename
    MyAsset.assetType = actionType
    MyAsset.exportPath = absdirpath
    MyAsset.exportTime = exportTime
    MyAsset.object = obj
    return MyAsset
Exemplo n.º 43
0
def startTimer(timeit):
    global startTime
    startTime = time.process_time()
def ExportSingleFbxMesh(originalScene, dirpath, filename, obj):
    #Export a single Mesh

    scene = bpy.context.scene
    addon_prefs = bpy.context.preferences.addons[
        "blender-for-unrealengine"].preferences

    filename = ValidFilenameForUnreal(filename)
    curr_time = time.process_time()

    if bpy.ops.object.mode_set.poll():
        bpy.ops.object.mode_set(mode='OBJECT')

    SelectParentAndDesiredChilds(obj)
    bpy.ops.object.duplicate()
    currentObjName = []
    for objScene in scene.objects:
        currentObjName.append(objScene.name)

    bpy.ops.object.duplicates_make_real(use_base_parent=True,
                                        use_hierarchy=True)

    for objScene in scene.objects:
        if objScene.name not in currentObjName:
            objScene.select_set(True)
            pass

    for objScene in bpy.context.selected_objects:
        if objScene.data is not None:
            objScene.data = objScene.data.copy()

    ApplyNeededModifierToSelect()
    UpdateNameHierarchy(
        GetAllCollisionAndSocketsObj(bpy.context.selected_objects))
    active = bpy.context.view_layer.objects.active

    ApplyExportTransform(active)

    absdirpath = bpy.path.abspath(dirpath)
    VerifiDirs(absdirpath)
    fullpath = os.path.join(absdirpath, filename)
    meshType = GetAssetType(active)

    #Set socket scale for Unreal
    for socket in GetSocketDesiredChild(active):
        socket.delta_scale *= 0.01 * addon_prefs.StaticSocketsImportedSize

    #Set rename temporarily the Armature as "Armature"
    if meshType == "SkeletalMesh":
        oldArmatureName = RenameArmatureAsExportName(active)

    object_types = {'ARMATURE', 'CAMERA', 'EMPTY', 'LIGHT', 'MESH',
                    'OTHER'}  #Default

    if meshType == "StaticMesh":
        #Dont export ARMATURE with static mesh
        object_types = {'CAMERA', 'EMPTY', 'LIGHT', 'MESH', 'OTHER'}
    if meshType == "SkeletalMesh":
        #Dont export EMPTY with Skeletal mesh
        object_types = {
            'ARMATURE', 'EMPTY', 'CAMERA', 'LIGHT', 'MESH', 'OTHER'
        }

    bpy.ops.export_scene.fbx(
        filepath=fullpath,
        check_existing=False,
        use_selection=True,
        global_scale=GetObjExportScale(active),
        object_types=object_types,
        use_custom_props=addon_prefs.exportWithCustomProps,
        mesh_smooth_type="FACE",
        add_leaf_bones=False,
        use_armature_deform_only=active.exportDeformOnly,
        bake_anim=False,
        use_metadata=addon_prefs.exportWithMetaData,
        primary_bone_axis=active.exportPrimaryBaneAxis,
        secondary_bone_axis=active.exporSecondaryBoneAxis,
        axis_forward=active.exportAxisForward,
        axis_up=active.exportAxisUp,
        bake_space_transform=False)

    #Reset armature name
    if meshType == "SkeletalMesh":
        ResetArmatureName(active, oldArmatureName)

    bpy.ops.object.delete()

    exportTime = time.process_time() - curr_time

    MyAsset = originalScene.UnrealExportedAssetsList.add()
    MyAsset.assetName = filename
    MyAsset.assetType = meshType
    MyAsset.exportPath = absdirpath
    MyAsset.exportTime = exportTime
    MyAsset.object = obj
    return MyAsset
Exemplo n.º 45
0
    def _phiStar_rbf(self, dt, smooth=0.9):

        import numpy as np
        from scipy.spatial import cKDTree
        from scipy.interpolate import Rbf
        import time

        if self._mswarm == None:
            self._build_phiStar_swarm(ratio=smooth)

        walltime = time.process_time()

        mesh = self.phiField.mesh
        phiStar = mesh.add_variable(dataType="double", nodeDofCount=1)
        phiNorm = mesh.add_variable(dataType="double", nodeDofCount=1)
        mswarm_phiStar = self._mswarm_phiStar
        mswarm = self._mswarm

        # ## This can't be cached
        # if mesh.dim == 2:
        #     mesh_interpolator = Rbf(mesh.data[:,0],mesh.data[:,1], self.phiField.data, smooth=0.0, function='thin_plate' )
        # else:
        #     mesh_interpolator = Rbf(mesh.data[:,0],mesh.data[:,1], mesh.data[:,2], self.phiField.data, smooth=0.0, function='thin_plate' )

        # This really only needs to be built if the mesh changes
        mesh_tree = cKDTree( mesh.data )

        self._mswarm_advector.integrate(-dt, update_owners=True)

        # if mesh.dim == 2:
        #     mswarm_phiStar.data[:,0] = mesh_interpolator(mswarm.particleCoordinates.data[:,0],
        #                                                  mswarm.particleCoordinates.data[:,1])
        # else:
        #     mswarm_phiStar.data[:,0] = mesh_interpolator(mswarm.particleCoordinates.data[:,0],
        #                                                  mswarm.particleCoordinates.data[:,1],
        #                                                  mswarm.particleCoordinates.data[:,2] )
        #

        # EBE version - global RBF is impractical in nearly every case
        # We need to know the element size and mesh dimension to do this interpolation
        # correctly ... first, the 3D, Q1 version ...

        if "Q1" in mesh.elementType:
            stencil_size = 6**mesh.dim
        elif "Q2" in mesh.elementType:
            stencil_size = 7**mesh.dim
        else:  # No idea
            stencil_size = 7**mesh.dim

        # I think this can be eliminated at some stage ...
        local_nId = -1 * np.ones(mesh.nodesGlobal, dtype=np.int)
        for i, gId in enumerate(mesh.data_nodegId):
            local_nId[gId] = i

        for el in range(0, mesh.elementsLocal):
            # if el%1000 == 0:
            #     print("{}: Element: {}".format(uw.mpi.rank, el), flush=True)
            element_centroid = mesh.data[local_nId[mesh.data_elementNodes[el]]].mean(axis=0)

            d, local_nodes = mesh_tree.query(element_centroid, k=stencil_size)
            particles = np.where(mswarm.owningCell.data == el)[0]

            if mesh.dim == 2:
                mesh_interpolator = Rbf(mesh.data[local_nodes, 0],
                                        mesh.data[local_nodes, 1],
                                        self.phiField.data[local_nodes], smooth=0.0, function='thin_plate' )
                locations_x, locations_y = mswarm.particleCoordinates.data[particles].T
                mswarm_phiStar.data[particles, 0] = mesh_interpolator(locations_x, locations_y)

            else:
                mesh_interpolator = Rbf(mesh.data[local_nodes, 0],
                                        mesh.data[local_nodes, 1],
                                        mesh.data[local_nodes, 2],
                                        self.phiField.data[local_nodes], smooth=0.0, function='thin_plate' )

                locations_x, locations_y, locations_z = mswarm.particleCoordinates.data[particles].T
                mswarm_phiStar.data[particles, 0] = mesh_interpolator(locations_x, locations_y, locations_z)

        # Restore
        self._reset_phiStar_swarm()

        phiStar.data[:] = 0.0
        phiNorm.data[:] = 0.0

        # Surely this can be optimised (maybe the kdTree (cached) would be quicker / less storage ?)
        for i, gnode in enumerate(self._mswarm_map.data[:, 0]):
            node = np.where(mesh.data_nodegId == gnode)[0]
            phiStar.data[node] += mswarm_phiStar.data[i]
            phiNorm.data[node] += 1.0

        if uw.mpi.size > 1:
            mswarm.shadow_particles_fetch()
            for i, gnode in enumerate(self._mswarm_map.data_shadow[:, 0]):
                node = np.where(mesh.data_nodegId == gnode)[0]
                phiStar.data[node] += mswarm_phiStar.data_shadow[i, 0]
                phiNorm.data[node] += 1.0

        phiStar.data[np.where(phiNorm.data > 0.0)] /= phiNorm.data[np.where(phiNorm.data > 0.0)]

        self._phiStar_dirichlet_conditions(phiStar)
        #
        # print("{} - RBF interpolation ... {}s".format(uw.mpi.rank, time.process_time()-walltime), flush=True )
        #

        return phiStar
Exemplo n.º 46
0
    def number_bin(self, bin_feature, max_num_bin=None, no_mono_feature=None):
        """
        有序数值变量分箱组合
        :param bin_feature:list, 参与分箱的变量
        :param max_num_bin: int 最大分箱数
        :param no_mono_feature list 不参与单调检验的变量
        :return: bin_dict:dict, var_iv:dict
        """
        t0 = time.process_time()
        bin_dict, var_iv = {}, {}
        df = self._df
        bin_feature = IC.check_list(bin_feature)
        df[bin_feature] = df[bin_feature].astype("float")  #防止误将分类变量当做连续变量处理

        if max_num_bin == None:
            max_num_bin = self._max_num_bin
        else:
            max_num_bin = IC.check_int(max_num_bin)
        if no_mono_feature == None:
            no_mono_feature = []
        else:
            no_mono_feature = IC.check_list(no_mono_feature)

        # 开始分箱
        for col in bin_feature:
            try:
                if col not in no_mono_feature:
                    cutOffPoints = woebin(
                        dt=df,
                        y=self._target,
                        x=col,
                        breaks_list=self._breaks_list,
                        special_values=self._special_values,
                        min_perc_fine_bin=self._min_per_fine_bin,
                        min_perc_coarse_bin=self._min_per_coarse_bin,
                        stop_limit=self._stop_limit,
                        max_num_bin=max_num_bin,
                        method=self._method)[col]["breaks"].tolist()
                    cutOffPoints = [
                        float(i) for i in cutOffPoints
                        if str(i) not in ['inf', '-inf']
                    ]  # 切分点

                    # 单调检验合并方案结果
                    mono_cutOffPoints = monotonous_bin(
                        df=self._df,
                        col=col,
                        cutOffPoints=cutOffPoints,
                        target=self._target,
                        special_values=self._special_values)
                else:
                    mono_cutOffPoints = None

                # 最终方案
                bin_dict[col] = woebin(
                    dt=self._df,
                    y=self._target,
                    x=col,
                    breaks_list=mono_cutOffPoints,
                    special_values=self._special_values,
                    min_perc_fine_bin=self._min_per_fine_bin,
                    min_perc_coarse_bin=self._min_per_coarse_bin,
                    stop_limit=self._stop_limit,
                    max_num_bin=max_num_bin,
                    method=self._method)[col]
                # 保存IV
                var_iv[col] = bin_dict[col]["total_iv"].unique()[0]

            except:
                print("异常变量 {} 无法通过单调性检验".format(col))
                # 再次分箱
                bin_dict[col] = woebin(
                    dt=self._df,
                    y=self._target,
                    x=col,
                    breaks_list=self._breaks_list,
                    special_values=self._special_values,
                    min_perc_fine_bin=self._min_per_fine_bin,
                    min_perc_coarse_bin=self._min_per_coarse_bin,
                    stop_limit=self._stop_limit,
                    max_num_bin=max_num_bin,
                    method=self._method)[col]
                print("变量{}的BadRate为{}".format(
                    col, bin_dict[col]['badprob'].tolist()))
                # 保存IV
                var_iv[col] = bin_dict[col]["total_iv"].unique()[0]

        print("处理完{}个有序数值变量,耗时:{}秒".format(
            len(bin_feature), (time.process_time() - t0) * 100 / 60))
        return bin_dict, var_iv
def ExportSingleFbxCamera(originalScene, dirpath, filename, obj):
    #Export single camera

    scene = bpy.context.scene
    addon_prefs = bpy.context.preferences.addons[
        "blender-for-unrealengine"].preferences

    filename = ValidFilename(filename)
    if obj.type != 'CAMERA':
        return
    curr_time = time.process_time()
    if bpy.ops.object.mode_set.poll():
        bpy.ops.object.mode_set(mode='OBJECT')
    bpy.ops.object.select_all(action='DESELECT')

    #Select and rescale camera for export
    obj.select_set(True)
    bpy.context.view_layer.objects.active = obj
    obj.delta_scale *= 0.01
    if obj.animation_data is not None:
        action = obj.animation_data.action
        scene.frame_start = GetDesiredActionStartEndTime(obj, action)[0]
        scene.frame_end = GetDesiredActionStartEndTime(obj, action)[1]

    absdirpath = bpy.path.abspath(dirpath)
    VerifiDirs(absdirpath)
    fullpath = os.path.join(absdirpath, filename)

    bpy.ops.export_scene.fbx(
        filepath=fullpath,
        check_existing=False,
        use_selection=True,
        global_scale=GetObjExportScale(obj),
        object_types={'CAMERA'},
        use_custom_props=addon_prefs.exportWithCustomProps,
        add_leaf_bones=False,
        use_armature_deform_only=obj.exportDeformOnly,
        bake_anim=True,
        bake_anim_use_nla_strips=False,
        bake_anim_use_all_actions=False,
        bake_anim_force_startend_keying=True,
        bake_anim_step=GetAnimSample(obj),
        bake_anim_simplify_factor=obj.SimplifyAnimForExport,
        use_metadata=addon_prefs.exportWithMetaData,
        primary_bone_axis=obj.exportPrimaryBaneAxis,
        secondary_bone_axis=obj.exporSecondaryBoneAxis,
        axis_forward=obj.exportAxisForward,
        axis_up=obj.exportAxisUp,
        bake_space_transform=False)

    #Reset camera scale
    obj.delta_scale *= 100

    exportTime = time.process_time() - curr_time

    MyAsset = originalScene.UnrealExportedAssetsList.add()
    MyAsset.assetName = filename
    MyAsset.assetType = "Camera"
    MyAsset.exportPath = absdirpath
    MyAsset.exportTime = exportTime
    MyAsset.object = obj
    return MyAsset
Exemplo n.º 48
0
# Min  -Z = -50x_1 -20x_2 -25x_3
# s.a  9x_1 +3x_2 +5x_3 <= 500
#      5x_1 +4x_2 +0x_3 <= 350
#      3x_1 +0x_2 +2x_3 <= 150
#      0 <= x_1
#      0 <= x_2
#      0 <= x_3 <= 20

c = [-50, -20, -25]
A_ub = [[9, 3, 5], [5, 4, 0], [3, 0, 2]]
b_ub = [500, 350, 150]
x1_bounds = (0, None)
x2_bounds = (0, None)
x3_bounds = (0, 20)
bounds = [x1_bounds, x2_bounds, x3_bounds]

res = lp_solver(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds)
print(res)
print("")
print("Result:", -res.fun)
print("X:     ", res.x)
print("")

tempos = []
for _ in range(100):
    inicio = time.process_time()
    lp_solver(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds)
    tempos += [time.process_time() - inicio]
print("Tempo de execução:", np.mean(tempos), "+-", np.std(tempos), "segundos")
def time_run(fn, N, A):
    start = process_time()
    fn(N, A)
    end = process_time()
    return end - start
Exemplo n.º 50
0
import time
import math
import random

trials = int(input("Enter the number of trials: "))
t = time.process_time()
inside = 0
for i in range(trials):
    x = random.random()
    y = random.random()
    if (x * x + y * y) < 1.0:
        inside += 1
pi = 4.0 * inside / trials
et = time.process_time() - t
print("pi est =%9.6f error=%9.6f time=%f" % (pi, pi - math.pi, et))
Exemplo n.º 51
0
    return


(n, words, frequency) = init2()
print(len(words))
print(words)

print("\n\n")
print("We have the following terms:")
print(words)

print("\n with the following frequency:")
print(frequency)
print("\n")

t0 = time.process_time()
x = solveDP()
t1 = time.process_time() - t0
print("Time needed with DP", t1, "seconds.")

print("\n")
u0 = time.process_time()
y = solveRecursive(0, n)
u1 = time.process_time() - u0
print("Time needed with Recursion: " + str(u1) + " seconds.")

print("\n")
print("The three calculated search trees the same? " +
      ("yes " if x == y else "no :("))

dictionary = {}
Exemplo n.º 52
0
temps = []

for j in range(0, 5):
    tab = datasets[j][0]

    tab_x = []
    tab_y = []
    tab_col = []
    array = []
    for i in range(0, tab.size):
        tab_x.append(tab[i][0])
        tab_y.append(tab[i][1])
        tab_col.append(tab[i][2])
        array.append([tab[i][0], tab[i][1]])

    begin = time.process_time()
    clusterer = hdbscan.HDBSCAN(min_cluster_size=10)
    cluster_labels = clusterer.fit_predict(array)
    silhouette_avg = silhouette_score(array, cluster_labels)
    graph.append(silhouette_avg)
    t = time.process_time() - begin
    temps.append(t)

    titre = nom_datasets[j]
    plt.title(titre)
    plt.scatter(tab_x, tab_y, c=cluster_labels)
    matplotlib.pyplot.show()

y_axis = [temps, graph]
y_label = ['Temps', 'Silhouette Avg']
for i in range(0, 2):
Exemplo n.º 53
0
def getGraphs(n):
    # Get all nodes
    raw = db.GetNodes()
    # Oracle Campus Node
    oracleNode = raw[100]
    # Starting point nodes
    startCoords = raw[101:]
    # Remove oracle campus and starting point nodes from the rest
    raw = raw[:50]
    clustering_start_time = time.process_time()
    # Cluster data through k-means
    kmean = cluster(n, raw)
    # Print time it took to cluster the data
    print("Clustering took %s seconds." %
          (time.process_time() - clustering_start_time))
    graph_start_time = time.process_time()
    # Get edges with normalized weights
    edges = db.Normalise()
    # Create a graph for each cluster
    graphList = [graph.Graph() for i in range(n)]
    graphCounter = [0 for i in range(n)]

    # Create all vertices for each graph
    for i in range(len(raw)):
        graphList[kmean.labels_[i]].add_vertex(raw[i][0], raw[i][1],
                                               graphCounter[kmean.labels_[i]])
        graphCounter[kmean.labels_[i]] += 1

    # Create all edges for each graph
    for g in graphList:
        for i in range(len(edges[0])):
            # If both of the edge's vertices exist in the graph, then add that edge to the graph
            if ((g.get_vertex(edges[0][i]) != None)
                    and (g.get_vertex(edges[1][i]) != None)):
                g.add_edge(edges[0][i], edges[1][i], edges[2][i])

    for g in graphList:
        print("Vertices for graph:")
        for vert in g.vert_dict:
            print(g.vert_dict[vert])

    # Print time it took to create the complete graphs
    print("Creating graphs and assigning initial points took %s seconds." %
          (time.process_time() - graph_start_time))

    # Plot the clustered nodes
    labeledNodes = [[], [], [], [], []]
    for i in range(0, len(raw)):
        if (kmean.labels_[i] == 0):
            labeledNodes[0].append(raw[i])
        elif (kmean.labels_[i] == 1):
            labeledNodes[1].append(raw[i])
        elif (kmean.labels_[i] == 2):
            labeledNodes[2].append(raw[i])
        elif (kmean.labels_[i] == 3):
            labeledNodes[3].append(raw[i])
        elif (kmean.labels_[i] == 4):
            labeledNodes[4].append(raw[i])

    plt.scatter(*zip(*labeledNodes[0]), color="red")
    plt.scatter(*zip(*labeledNodes[1]), color="blue")
    plt.scatter(*zip(*labeledNodes[2]), color="green")
    plt.scatter(*zip(*labeledNodes[3]), color="yellow")
    plt.scatter(*zip(*labeledNodes[4]), color="orange")
    plt.scatter(-103.4158208, 20.708207, color="black")
    plt.scatter(-103.399223, 20.771859, color="purple")
    plt.scatter(-103.281663, 20.703316, color="purple")
    plt.scatter(-103.247455, 20.620652, color="purple")
    plt.scatter(-103.313528, 20.573827, color="purple")
    plt.scatter(-103.485171, 20.513207, color="purple")
    plt.xlabel("Longitud")
    plt.ylabel("Latitud")
    plt.show()

    return graphList
Exemplo n.º 54
0
def check(X, Y, B, n, m):
    print('\n\tПеревірка рівняння:')
    f1 = m - 1
    f2 = n
    f3 = f1 * f2
    q = 0.05

    student = partial(t.ppf, q=1 - q)
    Tstudent = student(df=f3)

    Gkr = cohren(f1, f2)

    avgY = [round(sum(i) / len(i), 3) for i in Y]
    print('\nСереднє значення y:', avgY)

    disp = skv(Y, avgY, n, m)
    print('Дисперсія y:', disp)

    Gp = kriteriaCochrana(Y, avgY, n, m)
    print(f'Gp = {Gp}')
    if Gp < Gkr:
        print(f'З ймовірністю {1 - q} дисперсії однорідні.')
    else:
        print("Необхідно збільшити кількість дослідів")
        m += 1
        main(n, m)

    ts = studentKriteria(X[:, 1:], Y, avgY, n, m)
    print('\nКритерій Стьюдента:\n', ts)
    res = [t for t in ts if t > Tstudent]
    finalK = [B[i] for i in range(len(ts)) if ts[i] in res]
    print(
        '\nКоефіцієнти {} статистично незначущі, тому ми виключаємо їх з рівняння.'
        .format([round(i, 3) for i in B if i not in finalK]))

    newY = []
    for j in range(n):
        newY.append(
            regression([X[j][i] for i in range(len(ts)) if ts[i] in res],
                       finalK))

    print(f'\nЗначення "y" з коефіцієнтами {finalK}')
    print(newY)

    timeStart = process_time()

    d = len(res)
    if d >= n:
        print('\nF4 <= 0')
        print('')
        return
    f4 = n - d

    Fp = kriteriaFisher(Y, avgY, newY, n, m, d)
    fisher = partial(f.ppf, q=0.95)
    Ft = fisher(dfn=f4, dfd=f3)
    print('\nПеревірка адекватності за критерієм Фішера')
    print('Fp =', Fp)
    print('Ft =', Ft)
    if Fp < Ft:
        print('Математична модель адекватна експериментальним даним')
    else:
        print('Математична модель не адекватна експериментальним даним')

    timeEnd = process_time()
    print(
        f"Час перевірки адекватності системи за критерієм Фішера: {timeEnd - timeStart}"
    )
Exemplo n.º 55
0
"""
Menu principal
"""
while True:
    printMenu()
    inputs = input('Seleccione una opción para continuar\n')

    if int(inputs[0]) == 1:
        print("Inicializando Catálogo ....")
        # cont es el controlador que se usará de acá en adelante
        cont = controller.initCatalog()

    elif int(inputs[0]) == 2:
        print("Cargando información de los archivos ....")
        t1_start = process_time()  #tiempo inicial
        controller.loadData(cont, moviesfile, castingfile)
        t1_stop = process_time()  #tiempo final
        print("Tiempo de ejecución ", t1_stop - t1_start, " segundos")
        print('Películas cargadas: ' + str(controller.moviesSize(cont)))

    elif int(inputs[0]) == 3:
        producer = input("Buscando películas de la productora?: ")
        t2_start = process_time()  #tiempo inicial
        moviesproductor = controller.getMoviesByProducer(cont, producer)
        t2_stop = process_time()  #tiempo final
        print("Tiempo de ejecución ", t2_stop - t2_start, " segundos")
        printMoviesByProducer(moviesproductor)

    elif int(inputs[0]) == 4:
        directorname = input("Buscando películas de director?: ")
Exemplo n.º 56
0
            if move_num >= 0 and move_num < len(legal_moves):
                return legal_moves[move_num]
            else:
                print("That wasn't one of the options.")
        except ValueError:
            print("Please enter an integer as your move choice.")


# main
# The input consists of a first line that is the desired search depth, then
# a board description (see read_board).  The desired output is the value of the board
# according to the evaluation function (with minimax recursion).
#
# Alternately, if the line of input is "play" instead, we can launch into an actual
# game for demo purposes.
firstline = input("")  # read just one line
if firstline == "play":
    play()
    # sys.exit("Game over")
else:
    try:
        search_depth = int(firstline)
    except ValueError:
        sys.exit("First line was neither 'play' nor a search depth; quitting...")
    print("Calculating...")
    board = read_board()
    start = time.process_time()
    print(minimax_value(board, WHITE_TO_PLAY,
                        search_depth, float("-inf"), float("inf")))
    print("Time taken:", time.process_time() - start)
Exemplo n.º 57
0
    def experiment(self):

        start = time.process_time()
        if self.__clustering:
            _n = len(self.clusters)
            for i, cluster in enumerate(self.clusters, start=1):
                log.debug("Started analyzing cluster: {}/{}".format(i, _n))
                self.current_cluster = self.train_X[cluster]
                cma_es = self.__cma_es()
                self.__results.append(cma_es)
                log.debug("Finished analyzing cluster: {}/{}".format(i, _n))
        else:
            log.debug("Started analyzing train dataset")
            self.current_cluster = self.train_X
            cma_es = self.__cma_es()
            self.__results.append(cma_es)
            log.debug("Finished analyzing train dataset")
        self.time_delta = time.process_time() - start


        log.debug('Creating test X, Y')
        self.test_X, self.test_Y = self.__data_model.test_set()
        if self.__scaler is not None:
            self.test_X = self.__scaler.transform(self.test_X)

        best_train = self.best(X=self.train_X, V=self.valid_X, Y=np.ones(self.train_X.shape[0]))
        V2 = self.__data_model.valid_set2()
        V2 = self.__scaler.transform(V2) if self.__scaler is not None else V2
        best_test = self.best(X=self.test_X, V=V2, Y=self.test_Y)

        database = Database(database_filename='{}.sqlite'.format(self.db))
        experiment = database.new_experiment()

        try:
            experiment['benchmark_mode'] = self.benchmark_mode
            experiment['seed'] = self.__seed
            experiment['n_constraints'] = self.__n_constraints
            experiment['constraints_generator'] = self.__constraints_generator
            experiment['clusters'] = len(self.clusters) if self.__clustering else 0
            experiment['clustering'] = self.__clustering
            experiment['margin'] = self.__margin
            experiment['standardized'] = self.__scaler is not None
            experiment['sigma'] = self.__sigma0
            experiment['name'] = self.__data_model.benchmark_model.name
            experiment['k'] = self.__data_model.benchmark_model.k
            experiment['n'] = self.__dimensions
            experiment['max_iter'] = self.max_iter
            experiment['d'] = self.__data_model.benchmark_model.d

            experiment['tp'] = int(best_test['tp'])
            experiment['tn'] = int(best_test['tn'])
            experiment['fp'] = int(best_test['fp'])
            experiment['fn'] = int(best_test['fn'])
            experiment['f'] = best_test['f']

            experiment['train_tp'] = int(best_train['tp'])
            experiment['train_tn'] = int(best_train['tn'])
            experiment['train_fp'] = int(best_train['fp'])
            experiment['train_fn'] = int(best_train['fn'])
            experiment['train_f'] = best_train['f']

            experiment['time'] = self.time_delta
            experiment['timestamp'] = time.time()

            experiment['positives'] = int(self.test_Y.sum())
            experiment['count_V'] = 2
            
            for i, es in enumerate(self.__results):

                W_start = list(self.split_w(es[8].x0, split_w=True))
                W_start[1] = np.sign(W_start[1])
                W = list(self.split_w(es[0], split_w=True))
                W[1] = np.sign(W[1])
                if self.__scaler is not None:
                    # destandardize
                    W_start[0] /= np.tile(self.__scaler.scale_, self.__n_constraints)
                    W_start[1] += np.sum(np.split(W_start[0] * np.tile(self.__scaler.mean_, self.__n_constraints), self.__n_constraints), axis=1)
                    W[0] /= np.tile(self.__scaler.scale_, self.__n_constraints)
                    W[1] += np.sum(np.split(W[0] * np.tile(self.__scaler.mean_, self.__n_constraints), self.__n_constraints), axis=1)

                cluster = experiment.new_child_data_set('cluster_{}'.format(i))
                cluster['w_start'] = to_str(W_start[0])
                cluster['w0_start'] = to_str(W_start[1])
                cluster["w_start_mathematica"] = self.to_mathematica(W_start)
                cluster['w'] = to_str(W[0])
                cluster['w0'] = to_str(W[1])
                cluster['w_mathematica'] = self.to_mathematica(W)
                cluster['f'] = es[1]

        except Exception as e:
            experiment['error'] = str(e)
            log.error("Cannot process: {}".format(self.sql_params))
            print(e)
        finally:
            experiment.save()