def butter_filter(self, lowcut, highcut=0, order=2, fs=20000.):

        nyq = 0.5 * fs

        if highcut != 0:
            low = lowcut / nyq
            high = highcut / nyq
            b, a = butter(order, [low, high], btype='band', analog=False)
        else:
            low = lowcut / nyq
            b, a = butter(order, low, btype='high', analog=False)

        if isinstance(self.metadata, list):
            progr = FloatProgress(min=0,
                                  max=len(self.metadata),
                                  description='Filtering...',
                                  bar_style='success')
            display(progr)
            cut_data_butter = [None] * len(self.raw_data)
            for i, k in enumerate(self.raw_data):
                cut_data_butter[i] = np.empty(k.shape)
                for j, z in enumerate(self.raw_data[i]):
                    cut_data_butter[i][j] = filtfilt(b, a, z)
                progr.value += 1
            self.butter_data = cut_data_butter
            progr.close()

        if isinstance(self.metadata, dict):
            cut_data_butter = [None] * len(self.raw_data)
            for i, k in enumerate(self.raw_data):
                cut_data_butter[i] = filtfilt(b, a, k)
            self.butter_data = np.asarray(cut_data_butter)
Example #2
0
def download_file(osm_url,dest):
    if config.get("general","proxy_https")!="":
        urllib2.install_opener(
            urllib2.build_opener(
                urllib2.ProxyHandler({'https': config.get("general","proxy_https")})
            )
        )
        
    file_name = "tempdata/"+osm_url.split('/')[-1]
    print "downloading: "+osm_url
    print "in progress"
    u = urllib2.urlopen(osm_url)
    f = open(file_name, 'wb')
    meta = u.info()
    file_size = int(meta.getheaders("Content-Length")[0])
    print "Downloading: %s Bytes: %s" % (file_name, file_size)

    file_size_dl = 0
    block_sz = 8192

    progressbar = FloatProgress(min=0, max=100) # instantiate the bar
    display(progressbar) # display the bar

    while True:
        buffer = u.read(block_sz)
        if not buffer:
            break
        file_size_dl += len(buffer)
        f.write(buffer)
        progressbar.value=file_size_dl * 100. / file_size
    f.close()
Example #3
0
def get_gaussian_labels_probabilities(digits, hidden_markov_models, n_observation_classes, n_hidden_states, n_iter, tol, display_progress, use_pickle, filename):

    labels_probabilities = []

    directory = settings.LABELS_PROBABILITIES_DIRECTORY + "centroids_" + str(n_observation_classes - 3)
    directory += "/hidden_states_" + str(n_hidden_states) + "/n_iter_" + str(n_iter) + "/tol_" + str(tol)
    path = directory + "/" + filename

    if use_pickle and os.path.isfile(path):

        labels_probabilities = pickle.load(open(path, 'rb'))

    else:

        f = FloatProgress(min=0, max=100)
        if display_progress:
            display(f)

        i = 0
        for dig in digits:
            probabilites = get_gaussian_label_probabilites(dig, hidden_markov_models)
            labels_probabilities.append(probabilites)
            f.value = (float(i) * 100.0) / float(len(digits))
            i += 1

        f.close()

        if use_pickle:
            if not os.path.exists(directory):
                os.makedirs(directory)
            with open(path,'wb') as f:
                pickle.dump(labels_probabilities,f)

    return labels_probabilities
Example #4
0
    def show(self, a_progress=None, ext='', p_format="{}:{}:{}%"):
        """
        进行进度控制显示主方法
        :param ext: 可以添加额外的显示文字,str,默认空字符串
        :param a_progress: 默认None, 即使用类内部计算的迭代次数进行进度显示
        :param p_format: 进度显示格式,默认{}: {}%,即'self._label:round(self._progress / self._total * 100, 2))%'
        """
        self.progress = a_progress if a_progress is not None else self.progress + 1
        ps = round(self._progress / self._total * 100, 2)

        if self._label is not None:
            # 如果初始化label没有就只显示ui进度
            self.f.write('\r')
            self.f.write(p_format.format(self._label, ext, ps))

        if ABuEnv.g_is_ipython:
            if self.progress_widget is None:
                self.progress_widget = FloatProgress(value=0, min=0, max=100)
                display(self.progress_widget)
            self.progress_widget.value = ps

        # 这样会出现余数结束的情况,还是尽量使用上下文管理器控制结束
        if self._progress == self._total:
            self.f.write('\r')
            if self.progress_widget is not None:
                self.progress_widget.close()
Example #5
0
def plot_digit_samples(samples, display_progress = False):

    f = FloatProgress(min=0, max=100)
    if display_progress:
        display(f)

    plt.clf();
    _, axarr = plt.subplots(2, 5);

    for i in range(0, 2):
        for j in range(0, 5):

            n = 5*i + j
            n -= 1
            if n < 0:
                n = 9


            x_points = []
            y_points = []

            for curve in samples[n][0].curves:
                for point in curve:
                    x_points.append(point[0])
                    y_points.append(point[1])

            axarr[i, j].plot(x_points, y_points, linewidth = 2.0)
            #axarr[i, j].axis([settings.IMAGE_PLOT_X_MIN, settings.IMAGE_PLOT_X_MAX, settings.IMAGE_PLOT_Y_MIN, settings.IMAGE_PLOT_Y_MAX]);
            f.value += 10

    f.close()
    plt.show();
Example #6
0
    def predict(self):
        '''
        Iteratively predict values based on available neighbor data
        On each iteration, predictdf attribute is revised
        '''
        self.predictdf = self.targetdf.copy()
        dftest = self.predictdf.copy()
        dftest = dftest[pd.isnull(dftest[self.label])]

        # Set up progressbar
        nullcount = pd.isnull(self.predictdf[self.label]).sum()
        if self.progressbar:
            maxnullcount = nullcount
            f = FloatProgress(min=0, max=maxnullcount)
            display(f)

        while nullcount > 0:
            if ((~pd.isnull(dftest.lead)) &
               (~pd.isnull(dftest.lag))).sum() > 0:
                dftest = self.predict_once(dftest, lead=True, lag=True)

            if (~pd.isnull(dftest.lead)).sum() > 0:
                dftest = self.predict_once(dftest, lead=True)

            if (~pd.isnull(dftest.lag)).sum() > 0:
                dftest = self.predict_once(dftest, lag=True)

            nullcount = pd.isnull(self.predictdf[self.label]).sum()
            print nullcount
            if self.progressbar:
                f.value = maxnullcount - nullcount
Example #7
0
    def __init__(self, total):
        super().__init__()
        from ipywidgets import FloatProgress
        from IPython.display import display

        self.progress = FloatProgress(min=0, max=total)
        display(self.progress)
Example #8
0
    def create_features_as_matrix(self, samples, show_progress_bar=False):
        '''
        Creates featres for all the given sample objects.
        @return: The created features, as a float numpy matrix (shape: n_samples X n_features).
        '''

        if show_progress_bar:

            from IPython.display import display
            from ipywidgets import FloatProgress

            progress_bar = FloatProgress(min=0, max=len(samples) - 1)
            display(progress_bar)

        feature_matrix = np.empty((len(samples), self.n_features()),
                                  dtype=np.float64)

        for i, sample in enumerate(samples):

            self.create_features_into_array(feature_matrix[i, :], sample)

            if show_progress_bar:
                progress_bar.value = i

        return feature_matrix
    def __init__(
        self,
        training_length: Any = None,
        update_interval: int = 100,
        bar_length: int = 50,
        out: Any = sys.stdout,
    ):
        self._training_length = training_length
        if training_length is not None:
            self._init_status_template()
        self._update_interval = update_interval
        self._recent_timing: List[Tuple[float, float, float]] = []

        self._total_bar = FloatProgress(description='total',
                                        min=0,
                                        max=1,
                                        value=0,
                                        bar_style='info')
        self._total_html = HTML()
        self._epoch_bar = FloatProgress(description='this epoch',
                                        min=0,
                                        max=1,
                                        value=0,
                                        bar_style='info')
        self._epoch_html = HTML()
        self._status_html = HTML()

        self._widget = VBox([
            HBox([self._total_bar, self._total_html]),
            HBox([self._epoch_bar, self._epoch_html]), self._status_html
        ])
class IPyBackend(ProgressBar):
    def __init__(self,
                 iterable=None,
                 length=None,
                 *,
                 label=None,
                 show_eta=True,
                 show_percent=None,
                 show_pos=False,
                 item_show_func=None,
                 info_sep=' '):
        from traitlets import TraitError
        try:
            from ipywidgets import FloatProgress
        except ImportError:
            from IPython.html.widgets.widget_float import FloatProgress

        try:
            self.backend = FloatProgress(value=0, min=0, step=1)
            # max and description are set via properties
        except TraitError:
            raise RuntimeError('IPython notebook needs to be running')

        super().__init__(iterable,
                         length,
                         label=label,
                         show_eta=show_eta,
                         show_percent=show_percent,
                         show_pos=show_pos,
                         item_show_func=item_show_func,
                         info_sep=info_sep)

        self.is_hidden = False

    def __enter__(self):
        from IPython.display import display
        display(self.backend)
        return super().__enter__()

    def render_finish(self):
        self.backend.close()

    def render_progress(self):
        info_bits = []
        if self.show_pos:
            info_bits.append(self.format_pos())
        if self.show_percent or (self.show_percent is None
                                 and not self.show_pos):
            info_bits.append(self.format_pct())
        if self.show_eta and self.eta_known and not self.finished:
            info_bits.append(self.format_eta())
        if self.item_show_func is not None:
            item_info = self.item_show_func(self.current_item)
            if item_info is not None:
                info_bits.append(item_info)

        self.backend.description = '{} {}'.format(
            self.label or '', self.info_sep.join(info_bits))
        self.backend.max = self.length
        self.backend.value = self.pos
Example #11
0
    def __init__(self,
                 iterable=None,
                 length=None,
                 *,
                 label=None,
                 show_eta=True,
                 show_percent=None,
                 show_pos=False,
                 item_show_func=None,
                 info_sep=' '):
        from IPython import get_ipython

        try:
            from ipywidgets import FloatProgress
        except ImportError:
            from IPython.html.widgets.widget_float import FloatProgress

        ipython = get_ipython()
        if not ipython or ipython.__class__.__name__ != 'ZMQInteractiveShell':
            raise RuntimeError('IPython notebook needs to be running')

        self.backend = FloatProgress(value=0, min=0, step=1)
        # max and description are set via properties

        super().__init__(iterable,
                         length,
                         label=label,
                         show_eta=show_eta,
                         show_percent=show_percent,
                         show_pos=show_pos,
                         item_show_func=item_show_func,
                         info_sep=info_sep)

        self.is_hidden = False
    def __init__(
        self,
        training_length=None,
        update_interval=100,
    ):
        self._training_length = training_length
        self._status_template = None
        self._update_interval = update_interval
        self._recent_timing = []

        self.desc_total = Label("Total:")
        self.desc_total.layout.width = "100px"
        self.pbar_total = FloatProgress(min=0, max=1.0, bar_style="success")
        self.text_total = Label("0%")
        self.desc_total.layout.padding = "5px"
        self.text_total.layout.padding = "5px"
        display(HBox([self.desc_total, self.pbar_total, self.text_total]))

        self.desc_epoch = Label("This epoch:")
        self.desc_epoch.layout.width = "100px"
        self.pbar_epoch = FloatProgress(min=0, max=1.0)
        self.text_epoch = Label("0%")
        self.desc_epoch.layout.padding = "5px"
        self.text_epoch.layout.padding = "5px"
        display(HBox([self.desc_epoch, self.pbar_epoch, self.text_epoch]))

        self.epoch_report = Label("")
        self.time_report = Label("")
        self.epoch_report.layout.padding = "5px"
        self.time_report.layout.padding = "5px"
        display(VBox([self.epoch_report, self.time_report]))
    def __init__(self,
                 iterable=None,
                 length=None,
                 *,
                 label=None,
                 show_eta=True,
                 show_percent=None,
                 show_pos=False,
                 item_show_func=None,
                 info_sep=' '):
        from traitlets import TraitError
        try:
            from ipywidgets import FloatProgress
        except ImportError:
            from IPython.html.widgets.widget_float import FloatProgress

        try:
            self.backend = FloatProgress(value=0, min=0, step=1)
            # max and description are set via properties
        except TraitError:
            raise RuntimeError('IPython notebook needs to be running')

        super().__init__(iterable,
                         length,
                         label=label,
                         show_eta=show_eta,
                         show_percent=show_percent,
                         show_pos=show_pos,
                         item_show_func=item_show_func,
                         info_sep=info_sep)

        self.is_hidden = False
    def build_stats_widget(self):
        from ipywidgets import FloatProgress, HBox, VBox, HTML, Layout, Button, Box
        loss_text = HTML('Loss', width='140px')
        self.loss_bar = FloatProgress(min=0.0,
                                      max=1.0,
                                      description='',
                                      height='10px')
        loss_widget = HBox([loss_text, self.loss_bar], width='100%')

        acc_text = HTML('Accuracy', width='140px')
        self.acc_bar = FloatProgress(min=0,
                                     max=1.0,
                                     description='',
                                     height='10px')
        acc_widget = HBox([acc_text, self.acc_bar], width='100%')

        box_layout = Layout(display='flex',
                            flex_flow='row',
                            align_items='stretch',
                            justify_content='space-around',
                            border='1px solid #48A7F2',
                            width='100%')

        return Box(children=[acc_widget, loss_widget],
                   layout=box_layout,
                   box_style='info')
    def solve(self, r0):
        """Trace rays through the turbulent grid

        Args:
            r0 (4xN float): array of N rays, in their initial configuration
        """
        f = FloatProgress(min=0,
                          max=self.ne_grid.shape[0],
                          description='Progress:')
        display(f)

        self.r0 = r0  # keep the original
        dz = self.z[1] - self.z[0]
        DZ = Z1(dz)  # matrix to push rays by dz

        rt = r0.copy()  # iterate to save memory, starting at r0

        for i, ne_slice in enumerate(self.ne_grid):
            f.value = i

            gx, gy = gradient_interpolator(ne_slice, self.x, self.y)
            rr1 = deflect_rays(rt, gx, gy, dz=dz)
            rt = transform(DZ, rr1)

        self.rt = rt
Example #16
0
def plot_digits_heatmap(digits, display_progress = False):

    f = FloatProgress(min=0, max=100)
    if display_progress:
        display(f)

    plt.clf();
    _, axarr = plt.subplots(2, 5);

    for i in range(0, 2):
        for j in range(0, 5):

            n = 5*i + j

            x_points = []
            y_points = []
            for digit in digits:
                if digit.label == n:
                    for curve in digit.curves:
                        for point in curve:
                            x_points.append(point[0])
                            y_points.append(point[1])

            heatmap, xedges, yedges = np.histogram2d(x_points, y_points, bins=50);

            extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]];

            axarr[i, j].imshow(np.rot90(heatmap), extent=extent);
            axarr[i, j].axis([settings.IMAGE_PLOT_X_MIN, settings.IMAGE_PLOT_X_MAX, settings.IMAGE_PLOT_Y_MIN, settings.IMAGE_PLOT_Y_MAX]);
            f.value += 10

    f.close()
    plt.show();
Example #17
0
def plot_digit(digit, display_progress = False):

    fig=plt.figure()
    ax=fig.add_subplot(111)

    f = FloatProgress(min=0, max=100)
    if display_progress:
        display(f)

    n_points = 0
    for curve in digit.curves:
        n_points += len(curve)

    i = 0
    for curve in digit.curves:
        x_points = []
        y_points = []
        for point in curve:
            x_points.append(point[0])
            y_points.append(point[1])
            f.value = 100.0*(float(i) / float(n_points))
            i += 1

        plt.plot(x_points, y_points, linewidth = 2.0)
    f.close()

    plt.axis([settings.IMAGE_PLOT_X_MIN, settings.IMAGE_PLOT_X_MAX, settings.IMAGE_PLOT_Y_MIN, settings.IMAGE_PLOT_Y_MAX])
    plt.show()
Example #18
0
    def predict(self):
        '''
        Iteratively predict values based on available neighbor data
        On each iteration, predictdf attribute is revised
        '''
        self.predictdf = self.targetdf.copy()
        dftest = self.predictdf.copy()
        dftest = dftest[pd.isnull(dftest[self.label])]

        # Set up progressbar
        nullcount = pd.isnull(self.predictdf[self.label]).sum()
        if self.progressbar:
            maxnullcount = nullcount
            f = FloatProgress(min=0, max=maxnullcount)
            display(f)

        while nullcount > 0:
            if ((~pd.isnull(dftest.lead)) &
                (~pd.isnull(dftest.lag))).sum() > 0:
                dftest = self.predict_once(dftest, lead=True, lag=True)

            if (~pd.isnull(dftest.lead)).sum() > 0:
                dftest = self.predict_once(dftest, lead=True)

            if (~pd.isnull(dftest.lag)).sum() > 0:
                dftest = self.predict_once(dftest, lag=True)

            nullcount = pd.isnull(self.predictdf[self.label]).sum()
            print nullcount
            if self.progressbar:
                f.value = maxnullcount - nullcount
Example #19
0
def _compute_current_density(bvs, gvx, gvy, gvz, cmatr, cmati, occvec, verbose=True):
    """Compute the current density in each cartesian direction."""
    nbas, npts = bvs.shape
    curx = np.zeros(npts, dtype=np.float64)
    cury = np.zeros(npts, dtype=np.float64)
    curz = np.zeros(npts, dtype=np.float64)
    cval = np.zeros(nbas, dtype=np.float64)
    if verbose:
        fp = FloatProgress(description='Computing:')
        display(fp)
    for mu in range(nbas):
        if verbose:
            fp.value = mu / nbas * 100
        crmu = cmatr[mu]
        cimu = cmati[mu]
        bvmu = bvs[mu]
        gvxmu = gvx[mu]
        gvymu = gvy[mu]
        gvzmu = gvz[mu]
        for nu in range(nbas):
            crnu = cmatr[nu]
            cinu = cmati[nu]
            bvnu = bvs[nu]
            gvxnu = gvx[nu]
            gvynu = gvy[nu]
            gvznu = gvz[nu]
            cval = evaluate('-0.5 * (occvec * (crmu * cinu - cimu * crnu))', out=cval)
            csum = cval.sum()
            evaluate('curx + csum * (bvmu * gvxnu - gvxmu * bvnu)', out=curx)
            evaluate('cury + csum * (bvmu * gvynu - gvymu * bvnu)', out=cury)
            evaluate('curz + csum * (bvmu * gvznu - gvzmu * bvnu)', out=curz)
    if verbose:
        fp.close()
    return curx, cury, curz
def evaluate (self, df, is_training, batch_size, sess, dropout_prob = 0.2):
    X = get_feature_X(df,maxlen)
    Y = pd.get_dummies(df.is_duplicate)
    sess = self.sess
    start_index = 0
    final_loss = 0
    final_acc = 0
    current_total_trained =0  
    p_bar = FloatProgress()
    display(p_bar)
    start_time = time.time()
    while start_index < X[0].shape[0]:
        temp_x1 = X[0][start_index:start_index+batch_size]
        temp_x2 = X[1][start_index:start_index+batch_size]
        temp_seq_len1 = X[2][start_index:start_index+batch_size]
        temp_seq_len2 = X[3][start_index:start_index+batch_size]
        test_y = Y[start_index:start_index+batch_size]

        feed_dict = {
            self.min_mask1: get_init_min_mask_value(temp_seq_len1),
            self.min_mask2: get_init_min_mask_value(temp_seq_len2),
            self.seq_length1: temp_seq_len1,
            self.seq_length2: temp_seq_len2,
            self.input: temp_x1,
            self.input2: temp_x2,
            self.y: test_y
        }
        
        if is_training:
            feed_dict[self.prob] = 1 - dropout_prob
        
        current_total_trained += temp_x1.shape[0]

        if is_training:
            # the exact output you're looking for:
            _, c, ac =  sess.run([self.optimizer, self.loss, self.acc], feed_dict=feed_dict)
            final_loss += c * temp_x1.shape[0]
            final_acc += ac * temp_x1.shape[0]
            #print("%s/%s training loss %s"  % (start_index, X[0].shape[0], final_loss/current_total_trained))
#             sys.stdout.write("\r%s/%s training loss %s"  % (start_index, X[0].shape[0], c))
#             sys.stdout.flush()
            duration = time.time() - start_time
            speed = duration/current_total_trained
            eta = (X[0].shape[0]-current_total_trained)*speed
            p_bar.value = current_total_trained/X[0].shape[0]
            p_bar.description = "%s/%s, eta %s sec"%(current_total_trained, X[0].shape[0], eta)
        else:
            c, ac, pred, real =  sess.run([self.loss, self.acc, self.output, self.y], feed_dict=feed_dict)
            final_loss += c * temp_x1.shape[0]
            final_acc += ac * temp_x1.shape[0]
            # print('real:', real)
            # print('pred:', pred)
            print(sum(np.argmax(real, axis=1)==np.argmax(pred, axis=1)))
        start_index += batch_size
        
    final_loss = final_loss/X[0].shape[0]
    final_acc = final_acc/X[0].shape[0]
    return final_loss, final_acc
def paint_in(contours, image):
    # Create an empty array the size of the original image
    painted_in = np.zeros_like(image)
    # Create the progress bar
    f = FloatProgress(min=0, max=len(contours)-1)
    display(f)
    # Make a list of lists of painted in points for every column of the image.
    # Those function as limits - we paint from a certain position up to the
    # closest one of these
    limits = [[] for i in image[0]]
    # Paint in every contour
    for n, contour in enumerate(contours):
        # Go through each point of the contour
        for i in range(len(contour)):
            # If colour is -1 by the end of the forthcoming ifs,
            # that means the direction in which the contour is going
            # is too ambiguous to use
            colour = -1
            # Determine if the contour is going left of right.
            # This uses a very convenient aspect of skimage's contour-finding
            # function - they're either clockwise or anticlockwise depending
            # on the colour they enclose.
            # Note that we usually compare the point before and the point
            # after, to get a general trend at that position.
            direction = contour[(i+1) % len(contour), 1]-contour[i-1, 1]
            if direction > 0:
                colour = 0
            elif direction < 0:
                colour = 1
            else:
                # If the x coordinate doesn't change, perform other checks:
                # This calculates the clockwise or anticlockwise direction
                direction = ((contour[i, 1]-contour[i-1, 1])*(contour[i, 0]+contour[i-1, 0])
                             + (contour[(i+1) % len(contour), 1]-contour[i, 1])*(contour[(i+1) % len(contour), 0]+contour[i, 0]))
                # Check that the y coordinate changes
                if contour[(i+1) % len(contour), 0]-contour[i-1, 0]:
                    if direction > 0:
                        colour = 1
                    elif direction <= 0:
                        colour = 0
            # If we have established what colour we want, paint the pixels
            # above this one
            if colour != -1:
                # Establish the painting limit, which is the highest value in
                # paint_limit for this column that is below the current pixel
                paint_limit = 0
                for limit in limits[contour[i, 1]]:
                    if limit < contour[i, 0] and paint_limit < limit:
                        paint_limit = limit
                # Paint in
                painted_in[paint_limit+1:contour[i, 0], contour[i, 1]] = colour
                # Add this pixel to the limit list
                limits[contour[i, 1]].append(contour[i, 0])
                # Paint this pixel white, so that the contours are always white
                painted_in[contour[i, 0], contour[i, 1]] = 1
        f.value = n
    # Return the finished image
    return painted_in
Example #22
0
def iter_progress(it, n):

    f = FloatProgress(min=0, max=n)
    display(f)

    for x in it:
        yield x
        f.value += 1
        f.description = f'{int(100*f.value/n)}%'
Example #23
0
def noaa_spider(url, word, maxPages):
    """ Return something. """

    if not os.path.isdir('data'):
        os.mkdir('data')

    pagesToVisit = [url]
    textfiles = []
    numberVisited = 0
    foundWord = False
    urlsVisited = set()
    foundFiles = set()

    progressBar = FloatProgress(min=0, max=maxPages)
    display(progressBar)
    progressBar.value = 0

    # The main loop. Create a LinkParser and get all the links on the page.
    # Also search the page for the word or string
    # In our getLinks function we return the web page
    # (this is useful for searching for the word)
    # and we return a set of links from that web page
    # (this is useful for where to go next)
    while numberVisited < maxPages and pagesToVisit != [] and not foundWord:

        # Start from the beginning of our collection of pages to visit:
        url = pagesToVisit[0]

        pagesToVisit = pagesToVisit[1:]
        # try:
        #print(numberVisited, "Visiting:", url)
        parser = LinkParser()

        if url not in urlsVisited:
            urlsVisited.add(url)
            if '.txt' in url:
                if word in url:
                    textfiles = textfiles + [url]
                    foundFiles.add(url)
                    print("FOUND ", url)
                    name = './data/' + url.split('/')[-1]

                    if not os.path.isfile(name):
                        print('downloading...', name)
                        urlretrieve(url, name)
                    else:
                        print('file exists...', name)
            else:
                numberVisited = numberVisited + 1
                progressBar.value = numberVisited
                data, links = parser.getLinks(url)
                # Add the pages that we visited to the end of our collection
                # of pages to visit:
                pagesToVisit = pagesToVisit + links
    return foundFiles
Example #24
0
def slice_mrc_stack(mrc,
                    scratch,
                    scanshape,
                    optx,
                    opty,
                    startframe=0,
                    wx=500,
                    wy=500):
    """
	Slice the *.mrc movie into all of its subframes

    Accepts:
        mrc         (MrcMemmap) memory map into the mrc file (such as opened by
                    py4DSTEM.file.io.read(...,load='relativity'))
        scratch     (str) path to a scratch file where a numpy memmap containing the re-sliced stack
                    will be buffered
                    NOTE! this will overwrite whatever file is at this path! be careful!
                    ALSO NOTE! this file is where the data in the DataCube will actually live!
                    Either save the DataCube as a py4DSTEM *.h5 or use separate scratches for
                    different data!
        scanshape   (numpy array) 2-element array containing the scan shape (Rx, Ry)
        optx, opty  (numpy meshgrids) the optimized centers of the subframes from subframeAlign(...)
        wx,wy       (ints) subframe sizes x and y

	Returns:
        dc          (DataCube) a py4DSTEM DataCube containing the sliced up stack, in the correct order
	"""

    nframe = scanshape.prod() // optx.size

    dshape = (int(nframe), int(optx.size), wx, wy)

    vstack = np.memmap(scratch, mode='w+', dtype='<i2', shape=dshape)

    f = FloatProgress(min=0, max=nframe - 1)
    display(f)

    t0 = time()

    for i in np.arange(startframe, startframe + nframe):
        f.value = i - startframe
        frame = mrc.data[int(i), :, :]
        stack = slice_subframes(frame, optx, opty, wx, wy)
        vstack[int(i - startframe), :, :, :] = np.transpose(stack, (2, 0, 1))

    t = time() - t0

    print("Sliced {} diffraction patterns in {}h {}m {}s".format(
        scanshape.prod(), int(t / 3600), int(t / 60), int(t % 60)))
    mrc.close()

    dc = DataCube(vstack)
    dc.set_scan_shape(scanshape[0], scanshape[1])

    return dc
        def direct_read(self, path, start, stop, modified):
            # Generate an array with the length of the clean indices and broadcast the data directly into array.
            #-> The slicing is very time consuming for many datapoints (>100000)
            if isinstance(self.dict, File):
                electrode_info = np.asarray(self.dict['mapping']['channel',
                                                                 'electrode'])
                mask = electrode_info['electrode'] != -1
                clean_rel_inds = electrode_info['channel'][mask]

                traces = np.empty((len(clean_rel_inds), stop - start))
                self.metadata['DAC'] = np.empty([stop - start])
                self.dict['sig'].read_direct(traces[:, :],
                                             source_sel=np.s_[clean_rel_inds,
                                                              start:stop])
                self.dict['sig'].read_direct(self.metadata['DAC'][:],
                                             source_sel=np.s_[1024,
                                                              start:stop])
                print self.metadata['DAC'].shape
                self.metadata['time'] = np.arange(0, (stop - start) / 20000.,
                                                  1 / 20000.)

            if isinstance(self.dict, list):
                electrode_info = [
                    np.asarray(i['mapping']['channel', 'electrode'])
                    for i in self.dict
                ]
                mask = [i['electrode'] != -1 for i in electrode_info]
                clean_rel_inds = [
                    i[0]['channel'][i[1]] for i in zip(electrode_info, mask)
                ]

                traces = [
                    np.empty((len(i), stop - start)) for i in clean_rel_inds
                ]

                progr = FloatProgress(min=0,
                                      max=len(self.dict),
                                      description='Importing...',
                                      bar_style='success')
                display(progr)

                for i, v in enumerate(zip(traces, clean_rel_inds)):
                    self.dict[i]['sig'].read_direct(
                        v[0], source_sel=np.s_[v[1], start:stop])
                    self.metadata[i]['DAC'] = np.empty([stop - start])
                    self.dict[i]['sig'].read_direct(
                        self.metadata[i]['DAC'][:],
                        source_sel=np.s_[1024, start:stop])
                    self.metadata[i]['time'] = np.arange(
                        0, (stop - start) / 20000., 1 / 20000.)
                    progr.value += 1
                progr.close()

            return traces
Example #26
0
    def abel_invert(self, y_lim, x_range, parameters=None, model=None):
        if model is None:
            # Create the lmfit model
            model = GaussianModel()
            model += ConstantModel()
            params = model.make_params()
            params['c'].set(0.45)
            params['center'].set(0, vary=False)
            params['sigma'].set(min=0.001)
        if parameters is not None:
            for key, value in parameters.items():
                params[key].set(**value)

        f = FloatProgress(min=0.3, max=4.5)
        display(f)

        fit_data = []
        abel_data = []

        xx = x_range
        self.abel_extent = [-xx, xx, y_lim[0], y_lim[1]]
        for yy in np.arange(y_lim[0], y_lim[1], 1 / self.scale):
            f.value = yy
            self.create_lineout(start=(yy, -xx),
                                end=(yy, xx),
                                lineout_width_mm=1 / self.scale)
            # The data obtained by the lineout
            y = self.lo
            x = self.mm
            out = model.fit(y, params, x=x)

            fit_data.append(out.best_fit)
            abel_data.append(
                self.abel_gauss(x, out.best_values['sigma'],
                                out.best_values['amplitude']) *
                10)  #*10 converts from mm^-1 to cm^-1
        # Change the lists to numpy arrays and flip them
        fit_data = np.array(fit_data)[::-1]
        abel_data = np.array(abel_data)[::-1]
        extent = [-x_range, x_range, y_lim[0], y_lim[1]]
        origin = [
            int(len(fit_data) + y_lim[0] * self.scale),
            int(len(fit_data[0]) / 2)
        ]
        self.fit = DMFromArray(fit_data,
                               self.scale,
                               extent=extent,
                               origin=origin)
        self.abel = DMFromArray(abel_data,
                                self.scale,
                                extent=extent,
                                origin=origin)
        return self.fit, self.abel
Example #27
0
def frech(LINES, mat, n_cores=4, modulo=1):
    PQ = np.moveaxis(LINES, 1, 2).copy()
    #     PQ = PQ.reshape(PQ.shape[0], PQ.shape[1], 1, PQ.shape[2]).copy()

    futures = []
    progressbar = FloatProgress(min=0, max=100)
    display(progressbar)
    with ProcessPoolExecutor(n_cores) as executor:
        for j in range(num_lines):
            #             iterr = 0
            for i in range(j + 1, num_lines):
                if (i - j - 1) % modulo == 0:
                    #                     print(i)
                    futures.append(
                        executor.submit(disc_frech_wrap,
                                        PQ,
                                        j,
                                        i,
                                        modulo=modulo))
                elif num_lines - i < modulo:
                    futures.append(
                        executor.submit(disc_frech_wrap,
                                        PQ,
                                        j,
                                        i,
                                        modulo=modulo))
                    break

#                 iterr += 1
            if j % int(num_lines / 100) == 0:
                progressbar.value += 1

    progressbar2 = FloatProgress(min=0, max=int(len(futures) / 100))
    display(progressbar2)
    count = 0
    for p in as_completed(futures):
        count += 1
        if count % int(len(futures) / 100) == 0: progressbar2.value += 1
        try:
            ma, k, r = p.result()
            #             ind = 0
            #             print(ma.shape)
            lines_left = num_lines - r
            lim = modulo if lines_left > modulo else (num_lines - k - 1)
            for l in range(lim):
                mat[k, r + l] = ma[l].copy()
                mat[r + l, k] = mat[k, r + l]
#                 ind += 1
        except IndexError:
            pass
    print('Finished processing ', count * modulo, ' distances')
    return mat
Example #28
0
def compute_atom_two_out_of_core(hdfname, uni, a, **kwargs):
    """
    Perform an out of core periodic two body calculation for a simple cubic
    unit cell with dimension a.

    All data will be saved to and HDF5 file with the given filename. Key
    structure is per frame, i.e. ``frame_fdx/atom_two``.

    Args:
        hdfname (str): HDF file name
        uni (:class:`~exatomic.core.universe.Universe`): Universe
        a (float): Simple cubic unit cell dimension
        kwargs: Keyword arguments for bond computation (i.e. covalent radii)

    See Also:
        :func:`~exatomic.core.two._compute_bonds`
    """
    store = pd.HDFStore(hdfname, mode="a")
    unit_atom = uni.atom[['symbol', 'x', 'y', 'z', 'frame']].copy()
    unit_atom['symbol'] = unit_atom['symbol'].astype(str)
    unit_atom['frame'] = unit_atom['frame'].astype(int)
    unit_atom.update(uni.unit_atom)
    grps = unit_atom.groupby("frame")
    n = len(grps)
    fp = FloatProgress(description="AtomTwo to HDF:")
    display(fp)
    for i, (fdx, atom) in enumerate(grps):
        v = pdist_ortho(atom['x'].values, atom['y'].values, atom['z'].values,
                        a, a, a, atom.index.values, a)
        tdf = pd.DataFrame.from_dict({
            'frame':
            np.array([fdx] * len(v[0]), dtype=int),
            'dx':
            v[0],
            'dy':
            v[1],
            'dz':
            v[2],
            'dr':
            v[3],
            'atom0':
            v[4],
            'atom1':
            v[5],
            'projection':
            v[6]
        })
        _compute_bonds(uni.atom[uni.atom['frame'] == fdx], tdf, **kwargs)
        store.put("frame_" + str(fdx) + "/atom_two", tdf)
        fp.value = i / n * 100
    store.close()
    fp.close()
Example #29
0
def __create_exp_progress_box(name, exp_progress, rep_progress, show_full_progress=False):
    exp_progress_layout = Layout(display='flex', flex_flow='column', align_items='stretch', width='100%')
    exp_progress_bar = HBox([FloatProgress(value=exp_progress, min=.0, max=1., bar_style='info'), Label(name)])

    if show_full_progress:
        rep_progress_layout = Layout(display='flex', flex_flow='column', align_items='stretch',
                                     align_self='flex-end', width='80%')

        items = [FloatProgress(value=p, min=.0, max=1., description=str(i)) for i, p in enumerate(rep_progress)]
        rep_progress_box = Box(children=items, layout=rep_progress_layout)

        return Box(children=[exp_progress_bar, rep_progress_box], layout=exp_progress_layout)
    else:
        return exp_progress_bar
Example #30
0
def _counter_nb(items, tot=None):
    from ipywidgets import FloatProgress, FloatText
    from IPython.display import display

    if tot is not None:
        f = FloatProgress(min=0, max=tot)
    else:
        f = FloatText()
        f.value = 0
    display(f)

    for ii, item in enumerate(items):
        f.value += 1
        yield item
def transect(sources,
             X,
             Y,
             Nmc,
             t_e,
             pmap,
             v_x=0.1,
             clock_drift=False,
             e_dt=0.01,
             x0=None,
             new_method=False):
    RMS_t = np.zeros((len(X)))
    BiasX_t = np.zeros((len(X)))
    Success_t = np.zeros((Nmc, len(X)))

    r = receiver(X[0], Y, e_dt=e_dt)
    r_dt = r.dt

    f = FloatProgress(value=0.,
                      min=0.,
                      max=100.,
                      step=1.,
                      orientation='horizontal',
                      description='Loading :')
    display(f)

    for i in range(len(X)):

        f.value = i / len(X) * 100.

        # init a receiver
        r = receiver(X[i], Y, e_dt=e_dt, v_x=v_x)
        #r.dt = r_dt # unchanged variable during simulations
        #
        d_rms, bias_x, su = simu(r,
                                 sources,
                                 Nmc,
                                 t_e=t_e,
                                 t_drift=clock_drift,
                                 pmap=pmap,
                                 x0=x0,
                                 new_method=new_method)

        RMS_t[i] = d_rms
        BiasX_t[i] = bias_x
        Success_t[:, i] = su

    f.value = 100.

    return RMS_t, BiasX_t, Success_t
Example #32
0
def torus_dat(kp, kq, refine=300, segm=40, tR=1.6, tr=0.6):
    spt, spp, spq, spr, spR = sp.symbols("t p q r R", real=True)

    c = sp.Matrix([(spR+spr*sp.cos(2*sp.pi*spq*spt))*sp.cos(2*sp.pi*spp*spt),\
         (spR+spr*sp.cos(2*sp.pi*spq*spt))*sp.sin(2*sp.pi*spp*spt),\
          spr*sp.sin(2*sp.pi*spq*spt)])

    dc = sp.Matrix([sp.diff(x,spt) for x in c]) # derivative
    ldc = sp.sqrt(sum( [ x**2 for x in dc ] )).simplify() # speed
    udc = dc/ldc

    ## 2nd order
    kc = sp.Matrix([sp.diff(x,spt) for x in udc]) # curvature vector
    ks = sp.sqrt(sum( [ x**2 for x in kc])) # curvature scalar
    ukc = kc/ks # unit curvature vector

    ## bi-normal
    bnc = udc.cross(ukc) # cross of unit tangent and unit curvature.

    ## the parametrization of the boundary of the width w tubular neighbourhood
    spw, spu = sp.symbols("w, u", real=True) ## width of torus knot, and meridional parameter

    tSurf = c + spw*sp.cos(2*sp.pi*(spu+kp*kq*spt))*ukc + spw*sp.sin(2*sp.pi*(spu+kp*kq*spt))*bnc

    ## (b) ufuncify
    from sympy.utilities.autowrap import ufuncify
    knotSuf = [ufuncify([spt, spp, spq, spr, spR, spw, spu], tSurf[i]) for i in range(3)]
    knotSnp = sp.lambdify((spt, spp, spq, spr, spR, spw, spu), tSurf, "numpy" )

    kt = (np.pi*tr) / (4*kp) # knot radial thickness 2*pi*tr is circumf, and kp strands pass through so this
    ## should be around 2*pi*tr  would be 2*kp*kt for the knot to fill the surface, i.e kt = pi*tr / 4*kp
    ## make bigger or smaller depending on how much empty space one wants to see.

    seg = kp*refine ## segments along length of pq torus knot. kp*120 gives a fairly smooth image.

    def surf(i,j): ## lambdify
        return np.array(knotSnp(float(i)/seg, kp, kq, tr, tR, kt, float(j)/segm)).ravel()


    fp = FloatProgress(min=0, max=100, description="Knot data");     
    display(fp); ## progrss indicator

    xyz = np.ndarray( (seg+1, segm+1, 3) )
    for i,j in it.product( range(seg+1), range(segm+1) ):
        ## put the affine reparametrization here. 
        xyz[i,j] = surf(i,j)
        fp.value = int(100*i/(seg+1))
        
    fp.close()
    return(xyz)
class IPyBackend(ProgressBar):
	def __init__(self, iterable=None, length=None, *, label=None,
			show_eta=True, show_percent=None, show_pos=False,
			item_show_func=None, info_sep=' '):
		from IPython import get_ipython
		
		try:
			from ipywidgets import FloatProgress
		except ImportError:
			from IPython.html.widgets.widget_float import FloatProgress
		
		ipython = get_ipython()
		if not ipython or ipython.__class__.__name__ != 'ZMQInteractiveShell':
			raise RuntimeError('IPython notebook needs to be running')
		
		self.backend = FloatProgress(value=0, min=0, step=1)
		# max and description are set via properties
		
		super().__init__(iterable, length, label=label,
			show_eta=show_eta, show_percent=show_percent, show_pos=show_pos,
			item_show_func=item_show_func, info_sep=info_sep)
		
		self.is_hidden = False
	
	
	def __enter__(self):
		from IPython.display import display
		display(self.backend)
		return super().__enter__()
	
	def render_finish(self):
		self.backend.close()
	
	def render_progress(self):
		info_bits = []
		if self.show_pos:
			info_bits.append(self.format_pos())
		if self.show_percent or (self.show_percent is None and not self.show_pos):
			info_bits.append(self.format_pct())
		if self.show_eta and self.eta_known and not self.finished:
			info_bits.append(self.format_eta())
		if self.item_show_func is not None:
			item_info = self.item_show_func(self.current_item)
			if item_info is not None:
				info_bits.append(item_info)
		
		self.backend.description = '{} {}'.format(self.label or '', self.info_sep.join(info_bits))
		self.backend.max = self.length
		self.backend.value = self.pos
Example #34
0
def labeled_progress(it, n, labels, fillvalue="...", final=""):
    "Iterator and set of labels.  Reports progress with bar and label."

    detail = HTML(value='<i>initializing</i>', disabled=True)
    f = FloatProgress(min=0, max=n)

    display(HBox([f, detail]))

    for x, label in zip_longest(it, labels, fillvalue=fillvalue):
        detail.value = label
        yield x
        f.value += 1
        f.description = f'{int(100*f.value/n)}%'

    detail.value = final
Example #35
0
def get_airtemperature_from_files():
    #Read all Tif images in current directory

    files = glob('./data/*.txt')
    files.sort()
    progressBar = FloatProgress(min=0, max=len(files))
    display(progressBar)
    progressBar.value = 0
    air_temperature = []
    for filename in files:
        progressBar.value = progressBar.value + 1
        print('reading...', filename)
        air_temperature = air_temperature + read_data_column(filename)

    return air_temperature
def gradients(self, df , batch_size, sess):
    X = get_feature_X(df,maxlen)
    Y = pd.get_dummies(df.is_duplicate)
    sess = self.sess
    start_index = 0
    final_loss = 0
    current_total_trained =0  
    p_bar = FloatProgress()
    display(p_bar)
    start_time = time.time()
    while start_index < X[0].shape[0]:
        temp_x1 = X[0][start_index:start_index+batch_size]
        temp_x2 = X[1][start_index:start_index+batch_size]
        temp_seq_len1 = X[2][start_index:start_index+batch_size]
        temp_seq_len2 = X[3][start_index:start_index+batch_size]
        test_y = Y[start_index:start_index+batch_size]

        feed_dict = {
            self.min_mask1: get_init_min_mask_value(temp_seq_len1),
            self.min_mask2: get_init_min_mask_value(temp_seq_len2),
            self.seq_length1: temp_seq_len1,
            self.seq_length2: temp_seq_len2,
            self.input: temp_x1,
            self.input2: temp_x2,
            self.y: test_y
        }
        
      
        current_total_trained += temp_x1.shape[0]
        
        var_grad = tf.gradients(self.loss, [self.output])[0]
 
        # the exact output you're looking for:
        g =  sess.run([var_grad, self.concat_output], feed_dict=feed_dict)
        print("gradient %s"  % (g))
#             sys.stdout.write("\r%s/%s training loss %s"  % (start_index, X[0].shape[0], c))
#             sys.stdout.flush()
        duration = time.time() - start_time
        speed = duration/current_total_trained
        eta = (X[0].shape[0]-current_total_trained)*speed
        p_bar.value = current_total_trained/X[0].shape[0]
        p_bar.description = "%s/%s, eta %s sec"%(current_total_trained, X[0].shape[0], eta)

        start_index += batch_size
        break
        
    final_loss = final_loss/X[0].shape[0]
    return final_loss
Example #37
0
    def show(self, a_progress=None, ext='', p_format="{}:{}:{}%"):
        """
        进行进度控制显示主方法
        :param ext: 可以添加额外的显示文字,str,默认空字符串
        :param a_progress: 默认None, 即使用类内部计算的迭代次数进行进度显示
        :param p_format: 进度显示格式,默认{}: {}%,即'self._label:round(self._progress / self._total * 100, 2))%'
        """
        self.progress = a_progress if a_progress is not None else self.progress + 1
        ps = round(self._progress / self._total * 100, 2)

        if self._label is not None:
            # 如果初始化label没有就只显示ui进度
            self.f.write('\r')
            self.f.write(p_format.format(self._label, ext, ps))

        if ABuEnv.g_is_ipython:
            if self.progress_widget is None:
                self.progress_widget = FloatProgress(value=0, min=0, max=100)
                display(self.progress_widget)
            self.progress_widget.value = ps

        # 这样会出现余数结束的情况,还是尽量使用上下文管理器控制结束
        if self._progress == self._total:
            self.f.write('\r')
            if self.progress_widget is not None:
                self.progress_widget.close()
Example #38
0
 def __init__(self, a_pid):
     """通过进程pid初始化ui组件"""
     self.progress_widget = FloatProgress(value=0, min=0, max=100)
     self.text_widget = Text('pid={} begin work'.format(a_pid))
     # 通过box容器都放到一个里面
     self.progress_box = Box([self.text_widget, self.progress_widget])
     display(self.progress_box)
Example #39
0
def progress_iterator(orig_iterator, description):
    """Wrap an iterator so that a progress bar is displayed

    Parameters
    ----------
    orig_iterator: iterator
        The original iterator. It must implement the __len__ operation so that
        its length can be calculated in advance.
    description: string
        Description will give a text label for the bar.
    """
    progress_widget = FloatProgress(min=0, max=len(orig_iterator)-1)
    widget = HBox([Label(description), progress_widget])
    display(widget)
    for count, val in enumerate(orig_iterator):
        yield val
        progress_widget.value = count        
Example #40
0
def progress_iterator(orig_iterator, **kwargs):
    """Wrap an iterator so that a progress bar is displayed

    Parameters
    ----------
    orig_iterator: iterator
        The original iterator. It must implement the __len__ operation so that
        its length can be calculated in advance.
    kwargs: additional arguments
        Any additional arguments will be passed to the float widget.
        In particular, description will give a text label for the bar.
    """
    widget = FloatProgress(min=0, max=len(orig_iterator)-1, **kwargs)
    display(widget)
    for count, val in enumerate(orig_iterator):
        yield val
        widget.value = count
Example #41
0
def make_video(fname, show=True, figsize=(12,8), vmax=None, cmap='inferno', s=9, maxframes=None):
    with h5py.File('{0}.h5'.format(fname), 'r') as f:
        time = f['time'][:]
        n = time.shape[0]

        if maxframes is None or maxframes > n:
            stride = 1
            frames = n
        else:
            stride = round(n / maxframes)
            frames = n // stride

        if vmax is None:
            vmax = norm(f['step-0/vel'][:],axis=1).max()

        bar = FloatProgress(min=0, max=n-1, description='frames: {0}'.format(n))
        display(bar)

        def make_frame(step):
            coo = f['step-{0}/coo'.format(step * stride)][:]
            vel = f['step-{0}/vel'.format(step * stride)][:]
            L,H = amax(coo, axis=0)

            gcf().clear()
            dots = scatter(coo[:,0], coo[:,1], s=s, c=norm(vel,axis=1),
                    vmax=vmax, linewidth=0, cmap=cmap)

            m = L * 0.01
            xlim([-m, L+m])
            ylim([-m, H+m])
            colorbar()
            gca().set_aspect('equal')

            bar.value = step * stride
            bar.description = '{0}/{1}'.format(step * stride, n)
            return dots

        fig = figure(figsize=figsize);
        anim = animation.FuncAnimation(fig, make_frame, frames=frames, interval=30)
        anim.save('{0}.mp4'.format(fname), bitrate=3200, extra_args=['-vcodec', 'libx264'])

        close()
        bar.close()

    if show:
        return show_video(fname)
Example #42
0
def plot_digit_observations(digit, centroids, n_observation_classes, display_progress = False):

    pen_down_label = n_observation_classes - settings.PEN_DOWN_LABEL_DELTA
    pen_up_label = n_observation_classes - settings.PEN_UP_LABEL_DELTA
    stop_label = n_observation_classes - settings.STOP_LABEL_DELTA

    fig=plt.figure()
    ax=fig.add_subplot(111)

    f = FloatProgress(min=0, max=100)
    if display_progress:
        display(f)

    curves = []
    current_curve = []
    for observation in digit.observations:
        if observation < pen_down_label:
            point = centroids[observation]
            current_curve.append(point)
        elif observation == pen_up_label:
            if len(current_curve) > 0:
                curves.append(current_curve)
            current_curve = []

    n_points = 0
    for curve in curves:
        n_points += len(curve)

    i = 0
    for curve in curves:
        x_points = []
        y_points = []
        for point in curve:
            x_points.append(point[0])
            y_points.append(point[1])
            f.value = 100.0*(float(i) / float(n_points))
            i += 1

        plt.plot(x_points, y_points, linewidth = 2.0)
    f.close()

    plt.axis([settings.IMAGE_PLOT_X_MIN, settings.IMAGE_PLOT_X_MAX, settings.IMAGE_PLOT_Y_MIN, settings.IMAGE_PLOT_Y_MAX])
    plt.show()
Example #43
0
class ProgressBarJupyter(Progress):
    """Simple Jupyter progress bar

    Writes a progress bar to an ipython widget

    """
    def __init__(self, total):
        super(ProgressBarJupyter, self).__init__()
        from ipywidgets import FloatProgress
        from IPython.display import display
        self.progress = FloatProgress(min=0, max=total)
        display(self.progress)

    def update(self, step=1):
        self.progress.value += step

    def done(self):
        """Close the widget
        """
        self.progress.close()
Example #44
0
    def run(self, duration, obs=None):
        """Run the simulation.

        Parameters
        ----------
        duration : Real
            a duration for running a simulation.
                A simulation is expected to be stopped at t() + duration.
        obs : list of Obeservers, optional
            observers

        """
        from ecell4_base.core import TimeoutObserver

        timeout = TimeoutObserver(self.__timeout)
        if obs is None:
            obs = (timeout, )
        elif isinstance(obs, collections.Iterable):
            obs = tuple(obs) + (timeout, )
        else:
            obs = (obs, timeout)

        from ipywidgets import FloatProgress, HBox, HTML
        from IPython.display import display
        from time import sleep

        fp = FloatProgress(min=0, max=100)
        ptext = HTML()
        display(HBox(children=[fp, ptext]))

        tstart = self.__sim.t()
        upto = tstart + duration
        while self.__sim.t() < upto:
            self.__sim.run(upto - self.__sim.t(), obs)
            value = (self.__sim.t() - tstart) / duration
            fp.value = value * 100
            ptext.value = self.get_text(value, timeout.accumulation())
            sleep(self.__wait)

        fp.value = 100
        ptext.value = self.get_text(1, timeout.accumulation())
	def __init__(self, iterable=None, length=None, *, label=None,
			show_eta=True, show_percent=None, show_pos=False,
			item_show_func=None, info_sep=' '):
		from traitlets import TraitError
		try:
			from ipywidgets import FloatProgress
		except ImportError:
			from IPython.html.widgets.widget_float import FloatProgress
		
		try:
			self.backend = FloatProgress(value=0, min=0, step=1)
			# max and description are set via properties
		except TraitError:
			raise RuntimeError('IPython notebook needs to be running')
		
		super().__init__(iterable, length, label=label,
			show_eta=show_eta, show_percent=show_percent, show_pos=show_pos,
			item_show_func=item_show_func, info_sep=info_sep)
		
		self.is_hidden = False
	def __init__(self, iterable=None, length=None, *, label=None,
			show_eta=True, show_percent=None, show_pos=False,
			item_show_func=None, info_sep=' '):
		from IPython import get_ipython
		
		try:
			from ipywidgets import FloatProgress
		except ImportError:
			from IPython.html.widgets.widget_float import FloatProgress
		
		ipython = get_ipython()
		if not ipython or ipython.__class__.__name__ != 'ZMQInteractiveShell':
			raise RuntimeError('IPython notebook needs to be running')
		
		self.backend = FloatProgress(value=0, min=0, step=1)
		# max and description are set via properties
		
		super().__init__(iterable, length, label=label,
			show_eta=show_eta, show_percent=show_percent, show_pos=show_pos,
			item_show_func=item_show_func, info_sep=info_sep)
		
		self.is_hidden = False
Example #47
0
def periodic_nearest_neighbors_by_atom(uni, source, a, sizes, **kwargs):
    """
    Determine nearest neighbor molecules to a given source (or sources) and
    return the data as a dataframe.

    For a simple cubic periodic system with unit cell dimension ``a``,
    clusters can be generated as follows. In the example below, additional
    keyword arguments have been included as they are almost always required
    in order to correctly identify molecular units semi-empirically.

    .. code-block:: python

        periodic_nearest_neighbors_by_atom(u, [0], 40.0, [0, 5, 10, 50],
                                           dmax=40.0, C=1.6, O=1.6)

    Argument descriptions can be found below. The additional keyword arguments,
    ``dmax``, ``C``, ``O``, are passed directly to the two body computation used
    to determine (semi-empirically) molecular units. Note that although molecules
    are computed, neighboring molecular units are determine by an atom to atom
    criteria.

    Args:
        uni (:class:`~exatomic.core.universe.Universe`): Universe
        source (int, str, list): Integer label or string symbol of source atom
        a (float): Cubic unit cell dimension
        sizes (list): List of slices to create
        kwargs: Additional keyword arguments to be passed to atom two body calculation

    Returns:
        dct (dict): Dictionary of sliced universes and nearest neighbor table

    See Also:
        Sliced universe construction can be facilitated by
        :func:`~exatomic.algorithms.neighbors.construct`.
    """
    def sorter(group, source_atom_idxs):
        s = group[['atom0', 'atom1']].stack()
        return s[~s.isin(source_atom_idxs)].reset_index()

    if "label" not in uni.atom.columns:
        uni.atom['label'] = uni.atom.get_atom_labels()
    dct = defaultdict(list)
    grps = uni.atom.groupby("frame")
    ntot = len(grps)
    fp = FloatProgress(description="Slicing:")
    display(fp)
    for i, (fdx, atom) in enumerate(grps):
        if len(atom) > 0:
            uu = _create_super_universe(Universe(atom=atom.copy()), a)
            uu.compute_atom_two(**kwargs)
            uu.compute_molecule()
            if isinstance(source, (int, np.int32, np.int64)):
                source_atom_idxs = uu.atom[(uu.atom.index.isin([source])) &
                                           (uu.atom['prj'] == 13)].index.values
            elif isinstance(source, (list, tuple)):
                source_atom_idxs = uu.atom[uu.atom['label'].isin(source) &
                                           (uu.atom['prj'] == 13)].index.values
            else:
                source_atom_idxs = uu.atom[(uu.atom['symbol'] == source) &
                                           (uu.atom['prj'] == 13)].index.values
            source_molecule_idxs = uu.atom.loc[source_atom_idxs, 'molecule'].unique().astype(int)
            uu.atom_two['frame'] = uu.atom_two['atom0'].map(uu.atom['frame'])
            nearest_atoms = uu.atom_two[(uu.atom_two['atom0'].isin(source_atom_idxs)) |
                                        (uu.atom_two['atom1'].isin(source_atom_idxs))].sort_values("dr")[['frame', 'atom0', 'atom1']]
            nearest = nearest_atoms.groupby("frame").apply(sorter, source_atom_idxs=source_atom_idxs)
            del nearest['level_1']
            nearest.index.names = ['frame', 'idx']
            nearest.columns = ['two', 'atom']
            nearest['molecule'] = nearest['atom'].map(uu.atom['molecule'])
            nearest = nearest[~nearest['molecule'].isin(source_molecule_idxs)]
            nearest = nearest.drop_duplicates('molecule', keep='first')
            nearest.reset_index(inplace=True)
            nearest['frame'] = nearest['frame'].astype(int)
            nearest['molecule'] = nearest['molecule'].astype(int)
            dct['nearest'].append(nearest)
            for nn in sizes:
                atm = []
                for j, fdx in enumerate(nearest['frame'].unique()):
                    mdxs = nearest.loc[nearest['frame'] == fdx, 'molecule'].tolist()[:nn]
                    mdxs.append(source_molecule_idxs[j])
                    atm.append(uu.atom[uu.atom['molecule'].isin(mdxs)][['symbol', 'x', 'y', 'z', 'frame']].copy())
                dct[nn].append(pd.concat(atm, ignore_index=True))
        fp.value = i/ntot*100
    dct['nearest'] = pd.concat(dct['nearest'], ignore_index=True)
    for nn in sizes:
        dct[nn] = Universe(atom=pd.concat(dct[nn], ignore_index=True))
    fp.close()
    return dct
Example #48
0
class AbuProgress(object):
    """单进程(主进程)进度显示控制类"""

    # 过滤DeprecationWarning: Widget._keys_default is deprecated in traitlets 4.1: use @default decorator instead.
    @warnings_filter
    def __init__(self, total, a_progress, label=None):
        """
        外部使用eg:
            progess = AbuProgress(stock_df.shape[0], 0, 'merging {}'.format(m))
            for i, symbol in enumerate(stock_df['symbol']):
                progess.show(i + 1)
        :param total: 总任务数量
        :param a_progress: 初始进度
        :param label: 进度显示label
        """
        self._total = total
        self._progress = a_progress
        self._label = label
        self.f = sys.stdout
        self.progress_widget = None

    def __enter__(self):
        """创建子进程做进度显示"""
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.f.write('\r')
        if self.progress_widget is not None:
            self.progress_widget.close()

    @property
    def progress(self):
        """property获取self._progress"""
        return self._progress

    @progress.setter
    def progress(self, a_progress):
        """rogress.setter设置progress"""
        if a_progress > self._total:
            self._progress = self._total
        elif a_progress < 0:
            self._progress = 0
        else:
            self._progress = a_progress

    def show(self, a_progress=None, ext='', p_format="{}:{}:{}%"):
        """
        进行进度控制显示主方法
        :param ext: 可以添加额外的显示文字,str,默认空字符串
        :param a_progress: 默认None, 即使用类内部计算的迭代次数进行进度显示
        :param p_format: 进度显示格式,默认{}: {}%,即'self._label:round(self._progress / self._total * 100, 2))%'
        """
        self.progress = a_progress if a_progress is not None else self.progress + 1
        ps = round(self._progress / self._total * 100, 2)

        if self._label is not None:
            # 如果初始化label没有就只显示ui进度
            self.f.write('\r')
            self.f.write(p_format.format(self._label, ext, ps))

        if ABuEnv.g_is_ipython:
            if self.progress_widget is None:
                self.progress_widget = FloatProgress(value=0, min=0, max=100)
                display(self.progress_widget)
            self.progress_widget.value = ps

        # 这样会出现余数结束的情况,还是尽量使用上下文管理器控制结束
        if self._progress == self._total:
            self.f.write('\r')
            if self.progress_widget is not None:
                self.progress_widget.close()
Example #49
0
def float_progress(min_, max_):
    prog = FloatProgress(min=min_, max=max_)
    display(prog)
    for i in linspace(min_, max_, 100):
        time.sleep(0.1)
        prog.value = i
# In[8]:

number_of_samples = len(mutation_df)
number_of_pathways = len(pathways) 
sample_pathway_df = pd.DataFrame(np.zeros((number_of_samples, number_of_pathways), dtype=np.int),
                                 index=mutation_df.index,
                                 columns=pathways)


# Now populate this data frame. This is a slow Python loop, hence the progress bar. It takes a few minutes on my laptop. The idea is to loop over all gene-pathway interactions in the hetnet query. If the gene is in the Cognoma dataset, we grab the pathway id in that gene-pathway interaction. We look at Cognoma samples where that gene is labeled 1, i.e., at Cognoma samples that have a mutation in that gene, and grab the corresponding indices. Then, in the pathway matrix all samples get the associated pathway tagged as a 1, since they have a mutated gene that participates in that pathway.

# In[9]:

i = 0
progress_bar = FloatProgress(min=0, max=len(hetnet_results))
display(progress_bar)
for _, row in hetnet_results.iterrows():
    gene_id = row['gene_id']
    if gene_id in genes_in_both:
        pathway_id = row['pathway_id']
        affected_samples = mutation_df.loc[:, str(gene_id)] == 1
        sample_pathway_df.loc[affected_samples, pathway_id] = 1
    i += 1
    progress_bar.value = i
sample_pathway_df.head()


# Finally, we write to disk. The raw file is about 26MB, so we use bz2 compression. The file is no longer tracked due to <code>data/.gitignore</code>.

# In[10]:
Example #51
0
 def __init__(self, total):
     super(ProgressBarJupyter, self).__init__()
     from ipywidgets import FloatProgress
     from IPython.display import display
     self.progress = FloatProgress(min=0, max=total)
     display(self.progress)