def cepstral_analysis(self,
                          aic_type='aic',
                          Kmin_corrfactor=1.0,
                          bayes_p=False,
                          density_grid=None):
        """Perform the Cepstral Analysis on all blocks."""

        if self.GUI:
            progbar = FloatProgress(min=0, max=100)
            progbar.description = "0 %"
            display(progbar)

        self.BLOCK_NFREQS = self.BLOCK_SIZE / 2 + 1
        if self.MULTI_COMPONENT:
            print ' N_COMPONENTS = {:10d}'.format(self.N_COMPONENTS)
            self.ck_THEORY_var, self.psd_THEORY_mean = multicomp_cepstral_parameters(
                self.BLOCK_NFREQS, self.N_COMPONENTS)
        self.bayes_p = bayes_p

        if (self.N_BLOCKS == 1):
            raise NotImplemented('One block.')

        for L in range(self.N_BLOCKS):
            if self.MULTI_COMPONENT:
                self.block[L].compute_psd(DT=self.TSKIP,
                                          DT_FS=self.DT_FS,
                                          average_components=True)
                self.block[L].dct = ta.CosFilter(self.block[L].logpsd, \
                    ck_theory_var=self.ck_THEORY_var, psd_theory_mean=self.psd_THEORY_mean, aic_type=aic_type, Kmin_corrfactor=Kmin_corrfactor, normalization=self.BLOCK_SIZE)
            else:
                self.block[L].compute_psd(DT=self.TSKIP, DT_FS=self.DT_FS)
                self.block[L].dct = ta.CosFilter(
                    self.block[L].logpsd,
                    aic_type=aic_type,
                    Kmin_corrfactor=Kmin_corrfactor,
                    normalization=self.BLOCK_SIZE)  # theory_var=None

            self.block[L].dct.scan_filter_tau()
            if self.bayes_p:
                self.block[L].dct.compute_p_aic(method='ba')
                if density_grid is not None:
                    self.density_grid = density_grid
                    self.block[L].dct.compute_logtau_density(
                        method='ba',
                        only_stats=False,
                        density_grid=density_grid)
                else:
                    self.block[L].dct.compute_logtau_density(method='ba',
                                                             only_stats=True)
            if self.GUI:
                progbar.value = float(L + 1) / self.N_BLOCKS * 100.
                progbar.description = "%5.2f %%" % progbar.value

        if self.GUI:
            progbar.close()

        self.freqs = self.block[0].freqs
        return
   def read_datalines(self, NSTEPS=0, start_step=-1, select_ckeys=None, max_vector_dim=None, even_NSTEPS=True):
      """Read NSTEPS steps of file, starting from start_step, and store only
      the selected ckeys.
      INPUT:
        NSTEPS         -> number of steps to read (default: 0 -> reads all the file)
        start_step  = -1 -> continue from current step (default)
                       0 -> go to start step
                       N -> go to N-th step
        select_ckeys   -> an array with the column keys you want to read (see all_ckeys for a list)
        max_vector_dim -> when reading vectors read only this number of components (None = read all components)
        even_NSTEPS    -> round the number of steps to an even number (default: True)
      OUTPUT:
        data    ->  a dictionary with the selected-column steps
      """
      if self._GUI:
         progbar = FloatProgress(min=0, max=100)
         display(progbar)
      start_time = time()
      if (NSTEPS == 0):
         NSTEPS = self.MAX_NSTEPS
      self._set_ckey(select_ckeys, max_vector_dim)  # set the ckeys to read
      self._initialize_dic(NSTEPS)  # allocate dictionary
      self.gotostep(start_step)    # jump to the starting step
      
      # read NSTEPS of the file
      progbar_step = max(100000, int(0.005*NSTEPS))
      for step in range(NSTEPS):
         line = self.file.readline()
         if len(line) == 0:  # EOF
            print "Warning:  reached EOF."
            break
         values = np.array(line.split())
         for key, idx in self.ckey.iteritems():  # save the selected columns
            self.data[key][step,:] = np.array(map(float, values[idx]))
         if ( (step+1)%progbar_step == 0 ):
            if self._GUI:
               progbar.value = float(step+1)/NSTEPS*100.;
               progbar.description = "{:6.2f}%".format(progbar.value)
            else:
               print "    step = {:9d} - {:6.2f}% completed".format(step+1, float(step+1)/NSTEPS*100.)

      if self._GUI:
         progbar.close()
      # check number of steps read, keep an even number of steps
      if (step + 1 < self.NSTEPS):
         if (step == 0):
            print "WARNING:  no step read."
            return
         else:
            print "Warning:  less steps read."
            self.NSTEPS = step + 1
      if even_NSTEPS:
         if (NSTEPS%2 == 1):
            NSTEPS = NSTEPS - 1
      for key, idx in self.ckey.iteritems():  # free memory not used
         self.data[key] = self.data[key][:NSTEPS,:]
      print "  ( %d ) steps read." % (NSTEPS)
      self.NSTEPS = NSTEPS
      print "DONE.  Elapsed time: ", time()-start_time, "seconds"
      return self.data
def evaluate (self, df, is_training, batch_size, sess, dropout_prob = 0.2):
    X = get_feature_X(df,maxlen)
    Y = pd.get_dummies(df.is_duplicate)
    sess = self.sess
    start_index = 0
    final_loss = 0
    final_acc = 0
    current_total_trained =0  
    p_bar = FloatProgress()
    display(p_bar)
    start_time = time.time()
    while start_index < X[0].shape[0]:
        temp_x1 = X[0][start_index:start_index+batch_size]
        temp_x2 = X[1][start_index:start_index+batch_size]
        temp_seq_len1 = X[2][start_index:start_index+batch_size]
        temp_seq_len2 = X[3][start_index:start_index+batch_size]
        test_y = Y[start_index:start_index+batch_size]

        feed_dict = {
            self.min_mask1: get_init_min_mask_value(temp_seq_len1),
            self.min_mask2: get_init_min_mask_value(temp_seq_len2),
            self.seq_length1: temp_seq_len1,
            self.seq_length2: temp_seq_len2,
            self.input: temp_x1,
            self.input2: temp_x2,
            self.y: test_y
        }
        
        if is_training:
            feed_dict[self.prob] = 1 - dropout_prob
        
        current_total_trained += temp_x1.shape[0]

        if is_training:
            # the exact output you're looking for:
            _, c, ac =  sess.run([self.optimizer, self.loss, self.acc], feed_dict=feed_dict)
            final_loss += c * temp_x1.shape[0]
            final_acc += ac * temp_x1.shape[0]
            #print("%s/%s training loss %s"  % (start_index, X[0].shape[0], final_loss/current_total_trained))
#             sys.stdout.write("\r%s/%s training loss %s"  % (start_index, X[0].shape[0], c))
#             sys.stdout.flush()
            duration = time.time() - start_time
            speed = duration/current_total_trained
            eta = (X[0].shape[0]-current_total_trained)*speed
            p_bar.value = current_total_trained/X[0].shape[0]
            p_bar.description = "%s/%s, eta %s sec"%(current_total_trained, X[0].shape[0], eta)
        else:
            c, ac, pred, real =  sess.run([self.loss, self.acc, self.output, self.y], feed_dict=feed_dict)
            final_loss += c * temp_x1.shape[0]
            final_acc += ac * temp_x1.shape[0]
            # print('real:', real)
            # print('pred:', pred)
            print(sum(np.argmax(real, axis=1)==np.argmax(pred, axis=1)))
        start_index += batch_size
        
    final_loss = final_loss/X[0].shape[0]
    final_acc = final_acc/X[0].shape[0]
    return final_loss, final_acc
示例#4
0
def iter_progress(it, n):

    f = FloatProgress(min=0, max=n)
    display(f)

    for x in it:
        yield x
        f.value += 1
        f.description = f'{int(100*f.value/n)}%'
    def read_timesteps(self, selection, start_step=-1, select_ckeys=None, fast_check=True):
        """
        Read selected keys of file, within the provided range.
        Examples:
            read_timesteps(10, start_step=0, select_ckeys=['id,xu,yu,vu']) -->>   Read first 10 timesteps, only the specified columns
            read_timesteps(10, select_ckeys=['id,xu,yu,vu']) -->>   Read the next 10 timesteps, only the specified columns (DELTA_TIMESTEP is assumed)
            read_timesteps((10,30))      -->>  Read from TIMESTEP 10 to 30
            read_timesteps((10,30,2))    -->>  Read every 2 steps from TIMESTEP 10 to 30
        """
        if self._GUI:
            progbar = FloatProgress(min=0, max=100)
            display(progbar)
        start_time = time()
        self._set_ckey(select_ckeys)   # set the ckeys to read      --> ckey
        self._set_timesteps(selection, start_step)   # set the timesteps to read  --> timestep
        self._initialize_dic()   # allocate dictionary        --> data

        # extract the steps from the file
        progbar_step = max(1000, int(0.005 * self.nsteps))
        atomid_col = self.all_ckeys['id'][0]
        for istep, step in enumerate(self.timestep):
            self._gototimestep(step, fast_check)   # jump to the desired step,
            self.data[istep]['TIMESTEP'] = step
            for nat in range(self.NATOMS):   # read data (may be unsorted)
                line = self.file.readline()
                if len(line) == 0:   # EOF
                    raise EOFError('Warning:  reached EOF.')
                values = np.array(line.split())
                for key, idx in self.ckey.items():   # save the selected columns
                    atomid = int(values[atomid_col]) - 1   # current atom index (in LAMMPS it starts from 1)
                    if (key == 'element'):   # this should be improved
                        self.data[istep][key][atomid, :] = np.array(list(map(str, values[idx])))
                    else:
                        self.data[istep][key][atomid, :] = np.array(list(map(float, values[idx])))
            if ((istep + 1) % progbar_step == 0):
                if self._GUI:
                    progbar.value = float(istep + 1) / self.nsteps * 100.
                    progbar.description = '%g %%' % progbar.value
                else:
                    log.write_log('    step = {:9d} - {:6.2f}% completed'.format(istep + 1,
                                                                                 float(istep + 1) / self.nsteps * 100.))
        if self._GUI:
            progbar.close()
        # check number of steps read, keep an even number of steps
        if (istep + 1 < self.nsteps):   # (should never happen)
            if (istep == 0):
                log.write_log('WARNING:  no step read.')
                return
            else:
                log.write_log('Warning:  less steps read.')
                self.nsteps = istep + 1
        if not self._quiet:
            log.write_log('  ( %d ) steps read.' % (self.nsteps))
            log.write_log('DONE.  Elapsed time: ', time() - start_time, 'seconds')
        self._compute_current_step = False   # next time do not compute the current_step
        return self.data
示例#6
0
    def cepstral_analysis_kappa(self,other, aic_type='aic', Kmin_corrfactor=1.0, bayes_p=False, density_grid=None): #need also "other", a class with the charge current!
        """Perform the Cepstral Analysis on all blocks."""

        if self.GUI:
            progbar = FloatProgress(min=0, max=100)
            progbar.description = "0 %"
            display(progbar)

        self.BLOCK_NFREQS = self.BLOCK_SIZE/2 + 1
        if self.MULTI_COMPONENT:
            print ' N_COMPONENTS = {:10d}'.format(self.N_COMPONENTS)
            self.ck_THEORY_var, self.psd_THEORY_mean = tc.md.cepstral.multicomp_cepstral_parameters(self.BLOCK_NFREQS, self.N_COMPONENTS-1) #different number of degrees of freedom!
        self.bayes_p = bayes_p
        
        if (self.N_BLOCKS == 1):
            raise NotImplementedError('One block.')
        
        for L in range(self.N_BLOCKS):
            if self.MULTI_COMPONENT:
                self.block[L].compute_kappa(other=other.block[L],DT=self.TSKIP, DT_FS=self.DT_FS, average_components=True) #different method call!
                self.block[L].dct = tc.md.CosFilter(self.block[L].logpsd, \
                    ck_theory_var=self.ck_THEORY_var, psd_theory_mean=self.psd_THEORY_mean, aic_type=aic_type, Kmin_corrfactor=Kmin_corrfactor)#, normalization=self.BLOCK_SIZE) #removed (personal comunication with Loris)
            else:
                self.block[L].compute_kappa(other=other.block[L],DT=self.TSKIP, DT_FS=self.DT_FS) #different method call!
                self.block[L].dct = tc.md.CosFilter(self.block[L].logpsd, aic_type=aic_type, Kmin_corrfactor=Kmin_corrfactor)#, normalization=self.BLOCK_SIZE) # theory_var=None

            self.block[L].dct.scan_filter_tau()
            if self.bayes_p:
                self.block[L].dct.compute_p_aic(method='ba')
                if density_grid is not None:
                    self.density_grid = density_grid
                    self.block[L].dct.compute_logtau_density(method='ba', only_stats=False, density_grid=density_grid)
                else:
                    self.block[L].dct.compute_logtau_density(method='ba', only_stats=True)
            if self.GUI:
                progbar.value = float(L+1)/self.N_BLOCKS*100.;
                progbar.description = "%5.2f %%" % progbar.value
        
        if self.GUI:
            progbar.close()

        self.freqs = self.block[0].freqs
        return
示例#7
0
def labeled_progress(it, n, labels, fillvalue="...", final=""):
    "Iterator and set of labels.  Reports progress with bar and label."

    detail = HTML(value='<i>initializing</i>', disabled=True)
    f = FloatProgress(min=0, max=n)

    display(HBox([f, detail]))

    for x, label in zip_longest(it, labels, fillvalue=fillvalue):
        detail.value = label
        yield x
        f.value += 1
        f.description = f'{int(100*f.value/n)}%'

    detail.value = final
def gradients(self, df , batch_size, sess):
    X = get_feature_X(df,maxlen)
    Y = pd.get_dummies(df.is_duplicate)
    sess = self.sess
    start_index = 0
    final_loss = 0
    current_total_trained =0  
    p_bar = FloatProgress()
    display(p_bar)
    start_time = time.time()
    while start_index < X[0].shape[0]:
        temp_x1 = X[0][start_index:start_index+batch_size]
        temp_x2 = X[1][start_index:start_index+batch_size]
        temp_seq_len1 = X[2][start_index:start_index+batch_size]
        temp_seq_len2 = X[3][start_index:start_index+batch_size]
        test_y = Y[start_index:start_index+batch_size]

        feed_dict = {
            self.min_mask1: get_init_min_mask_value(temp_seq_len1),
            self.min_mask2: get_init_min_mask_value(temp_seq_len2),
            self.seq_length1: temp_seq_len1,
            self.seq_length2: temp_seq_len2,
            self.input: temp_x1,
            self.input2: temp_x2,
            self.y: test_y
        }
        
      
        current_total_trained += temp_x1.shape[0]
        
        var_grad = tf.gradients(self.loss, [self.output])[0]
 
        # the exact output you're looking for:
        g =  sess.run([var_grad, self.concat_output], feed_dict=feed_dict)
        print("gradient %s"  % (g))
#             sys.stdout.write("\r%s/%s training loss %s"  % (start_index, X[0].shape[0], c))
#             sys.stdout.flush()
        duration = time.time() - start_time
        speed = duration/current_total_trained
        eta = (X[0].shape[0]-current_total_trained)*speed
        p_bar.value = current_total_trained/X[0].shape[0]
        p_bar.description = "%s/%s, eta %s sec"%(current_total_trained, X[0].shape[0], eta)

        start_index += batch_size
        break
        
    final_loss = final_loss/X[0].shape[0]
    return final_loss
   def read_datalines(filedata, ckey, even_NSTEPS=True, GUI=False):
      if GUI:
         progbar = FloatProgress(min=0, max=100)
         display(progbar)
      NSTEPS = len(filedata) - 1
      data = initialize_dic(NSTEPS, ckey)

      progbar_step = max(100000, int(0.005*NSTEPS))
      for step, line in enumerate(filedata[1:]):
         if len(line) == 0:  # EOF
            print "Warning:  reached EOF."
            break
         values = np.array(line.split())
         for key, idx in ckey.iteritems():  # save the selected columns
            data[key][step,:] = np.array(map(float, values[idx]))
         if ( (step+1)%progbar_step == 0 ):
            if GUI:
               progbar.value = float(step+1)/NSTEPS*100.;
               progbar.description = "{:6.2f}%".format(progbar.value)
            else:
               print "    step = {:9d} - {:6.2f}% completed".format(step+1, float(step+1)/NSTEPS*100.)
      if GUI:
         progbar.close()
      # check number of steps read, keep an even number of steps
      if (step + 1 < NSTEPS):
         if (step == 0):
            print "WARNING:  no step read."
            return
         else:
            print "Warning:  less steps read."
            NSTEPS = step + 1
      if even_NSTEPS:
         if (NSTEPS%2 == 1):
            NSTEPS = NSTEPS - 1
      for key, idx in ckey.iteritems():  # free memory not used
         data[key] = data[key][:NSTEPS,:]
      print "  ( %d ) steps read." % (NSTEPS)
      return data
示例#10
0
    def read_datalines(self,
                       NSTEPS=0,
                       start_step=-1,
                       select_ckeys=None,
                       max_vector_dim=None,
                       even_NSTEPS=True):
        """
        Read NSTEPS steps of file, starting from start_step, and store only the selected ckeys.

        INPUT:
          NSTEPS         -> number of steps to read (default: 0 -> reads all the file)
          start_step  = -1 -> continue from current step (default)
                         0 -> go to start step
                         N -> go to N-th step
          select_ckeys   -> an array with the column keys you want to read (see all_ckeys for a list)
          max_vector_dim -> when reading vectors read only this number of components (None = read all components)
          even_NSTEPS    -> round the number of steps to an even number (default: True)

        OUTPUT:
          data    ->  a dictionary with the selected-column steps
        """
        if self._GUI:
            progbar = FloatProgress(min=0, max=100)
            display(progbar)
        start_time = time()
        if (NSTEPS == 0):
            NSTEPS = self.MAX_NSTEPS
        self._set_ckey(select_ckeys, max_vector_dim)  # set the ckeys to read
        self._initialize_dic(NSTEPS)  # allocate dictionary
        self.gotostep(start_step)  # jump to the starting step

        # read NSTEPS of the file
        progbar_step = max(100000, int(0.005 * NSTEPS))
        for step in range(NSTEPS):
            line = self.file.readline()
            if (len(line) == 0):  # EOF
                log.write_log('Warning:  reached EOF.')
                break
            if self.endrun_keyword in line:  # end-of-run keyword
                log.write_log('  endrun_keyword found.')
                step -= 1
                break
            values = np.array(line.split())
            if (values.size != self.NALLCKEYS):
                log.write_log(
                    'Warning:  line with wrong number of columns found. Stopping here...'
                )
                log.write_log(line)
                break
            for key, idx in self.ckey.items():  # save the selected columns
                self.data[key][step, :] = np.array(
                    list(map(float, values[idx])))
            if ((step + 1) % progbar_step == 0):
                if self._GUI:
                    progbar.value = float(step + 1) / NSTEPS * 100.
                    progbar.description = '{:6.2f}%'.format(progbar.value)
                else:
                    log.write_log(
                        '    step = {:9d} - {:6.2f}% completed'.format(
                            step + 1,
                            float(step + 1) / NSTEPS * 100.))

        if self._GUI:
            progbar.close()
        # check number of steps read, keep an even number of steps
        if (step + 1 < NSTEPS):
            if (step == 0):
                log.write_log('WARNING:  no step read.')
                return
            else:
                if (NSTEPS != self.MAX_NSTEPS):  # if NSTEPS was specified
                    log.write_log('Warning:  less steps read.')
                NSTEPS = step + 1  # the correct number of read steps
        # even the number of steps
        if even_NSTEPS:
            if (NSTEPS % 2 == 1):
                NSTEPS = NSTEPS - 1
                log.write_log(
                    '  Retaining an even number of steps (even_NSTEPS=True).')
        for key, idx in self.ckey.items():  # free the memory not used
            self.data[key] = self.data[key][:NSTEPS, :]
        log.write_log('  ( %d ) steps read.' % (NSTEPS))
        self.NSTEPS = NSTEPS
        log.write_log('DONE.  Elapsed time: ', time() - start_time, 'seconds')
        return self.data
示例#11
0
adam_params = {"lr": 0.1}
#adam_params = {"lr": 0.005, "betas": (0.9, 0.999)}
optimizer = pyro.optim.Adam(adam_params)
#optimizer = pyro.optim.SGD(adam_params)

svi = SVI(model, guide, optimizer, loss=Trace_ELBO())

# CHANGE: use a nice progress bar
n_steps = 400
pro = FloatProgress(min=0, max=n_steps - 1)
display(pro)

for step in range(n_steps):
    svi.step(data)
    pro.value += 1
    pro.description = str(step)

# CHANGE: change only at the end
np.save(file="qalpha0", arr=pyro.param("qalpha0").detach().numpy())
np.save(file="qalpha1", arr=pyro.param("qalpha1").detach().numpy())

plt.imshow(data_vis[:, 80:180])

# ADD: quick plot before exhaustive plot
loaded = np.load("qalpha1.npy")
plt.imshow(-loaded.reshape(-1, ntr), cmap="gray")
plt.show()

for i in range(nz):
    #plt.imshow(loaded[i].squeeze())
    print(loaded[i].sum())
示例#12
0
def benchmark(param, param_range=[0, 1, 0.1], learn=[], learn_i=[], test=[], test_i=[], learn_v="auto", test_v="auto", options={}, folder_learn="src/learning/", folder_test="src/tests/", neurons=(100), curve="interpolate"):
    """
    Effectue un banc de tests avec des paramètres donnés en évaluant la précision du réseau de neurones artificiels.

    :param param: Nom du paramètre à changer (doit être le nom de la variable tel que défini dans la fonction compare)
    :type param: string (paramètre accepté par la fonction compare)
    :param param_range: Tableau tridimensionnel contenant la valeur de début, de fin et le pas
    :type param_range: number[start, end, step]
    :param learn: Liste de noms formattable des échantillons d'apprentissage
    :type learn: string[]
    :param test: Liste de noms formattable des échantillons de test
    :type test: string[]
    :param learn_i: Liste de range (de 1 à n) pour la génération des fichiers du paramètre learn
    :type learn_i: number[]
    :param test_i: Liste de range (de 1 à n) pour la génération des fichiers du paramètre test
    :type test_i: number[]
    :param learn_v: Liste des valeurs des échantillons d'apprentissage (déterminé selon le nom du fichier si non précisé)
    :type learn_v: string[]
    :param test_v: Liste des valeurs des échantillons de test (déterminé selon le nom du fichier si non précisé)
    :type test_v: string[]
    :param folder_learn: Dossier contenant les échantillons d'apprentissage
    :type folder_learn: string
    :param folder_test: Dossier contenant les échantillons de tests
    :type folder_test: string
    :param options: Options de comparaison (voir la documentation de compare, certains paramètres sont requis)
    :type options: object
    :param neurons: Nombre de couches et de neuronnes
    :type neurons: (number, ...)
    """
    # Initialisation
    x = []
    y = []
    # Barre de progression
    progress = FloatProgress(min=param_range[0], max=param_range[1]+param_range[2], description="En attente...")
    display(progress)
    # Récupération des noms de fichiers pour réduire les temps de calculs
    #_learn = learning_files(learn, learn_i)
    #_test = learning_files(test, test_i)
    # Variation du paramètre
    for value in np.arange(param_range[0], param_range[1]+param_range[2], param_range[2]):
        # Calculs en cours
        progress.value = value
        progress.description = "{p} : {v}".format(p=param, v=value)
        options[param] = value
        x.append(value)
        y.append(learning(
            learn=learn, learn_i=learn_i, test=test, test_i=test_i, learn_v=learn_v, test_v=test_v,
            debug=False, benchmark_only=True, progress=[progress, param_range[2]],
            options=options, folder_learn=folder_learn, folder_test=folder_test, neurons=neurons
        ))
    # Mise à jour de la barre de progression
    progress.value = progress.max
    progress.description = "Terminé !"
    progress.bar_style = "success"
    # Nouvelle figure
    plt.figure(figsize=(8, 8), dpi= 80, facecolor="w", edgecolor="k")
    ax = plt.subplot(111)
    if (curve == "interpolate") and (len(x) >= 3):
        xi = np.linspace(x[0], x[-1], num=len(x)*10)
        ax.plot(xi, interp1d(x, y, kind="cubic")(xi))
    else:
        ax.plot(x, y)
    ax.set_xlabel("Variation du paramètre {x}".format(x=param))
    ax.set_xlim(param_range[0], param_range[1])
    ax.set_ylabel("Précision")
    ax.set_ylim(0, 1)
    plt.show()

    return x, y
示例#13
0
    def worker(self):
        def cancel(b):
            self.sc.cancelJobGroup(self.job_info.group_id)

        def toggle(widget):
            def f(b):
                for w in widget.children:
                    h = w.layout.height
                    if h is None or h == "16px":
                        w.layout.height = "0px"
                        b.icon = "arrow-circle-down"
                    else:
                        w.layout.height = "16px"
                        b.icon = "arrow-circle-right"

            return f

        style = {"description_width": "initial"}
        bars = {}
        labels = {}
        lastJob = None

        progressbars = VBox([])

        cancel_button = Button(button_style="",
                               tooltip="Cancel Spark Job",
                               icon="window-close")
        cancel_button.add_class("db-button")
        cancel_button.on_click(cancel)

        toggle_button = Button(button_style="",
                               tooltip="Toggle progress bar",
                               icon="arrow-circle-right")
        toggle_button.add_class("db-button")
        toggle_button.on_click(toggle(progressbars))

        indicator = HBox([toggle_button, progressbars])

        while self.running == 1:
            time.sleep(0.2)
            jobs = [(jobid, self.tracker.getJobInfo(jobid)) for jobid in
                    self.tracker.getJobIdsForGroup(self.job_info.group_id)
                    if self.tracker.getJobInfo(jobid).status == "RUNNING"]

            for j, job in jobs:
                if bars.get(j, None) is None:
                    if lastJob is not None:
                        bars[lastJob].value = 100.0
                    bars[j] = FloatProgress(
                        value=0.0,
                        min=0.0,
                        max=100.0,
                        description="Job: %04d Stage: %04d" % (j, 0),
                        bar_style="info",
                        orientation="horizontal",
                        style=style,
                    )
                    bars[j].add_class("db-bar")
                    labels[j] = Label(
                        value="",
                        description="Code:",
                        disabled=False,
                        layout=Layout(width="800px",
                                      height="100%",
                                      margin="0 0 0 5px"),
                    )
                    labels[j].add_class("db-label")

                    progressbar = HBox([bars[j], labels[j]])
                    progressbars.children = progressbars.children + (
                        progressbar, )
                    if not self.progressbar_showing:
                        self.progressbar_showing = True
                        display(indicator)

                lastJob = j
                stageIds = sorted(job.stageIds)
                for s in stageIds:
                    stageInfo = self.tracker.getStageInfo(s)
                    bars[j].description = "Job: %04d Stage: %04d" % (j, s)
                    labels[j].value = "code: '%s' / stages: %s" % (
                        stageInfo.name,
                        str(stageIds)[1:-1],
                    )
                    if stageInfo.numActiveTasks > 0:
                        progress = int(100 * stageInfo.numCompletedTasks /
                                       stageInfo.numTasks)
                        bars[j].value = progress

        if lastJob is not None and self.running == 0:
            bars[lastJob].value = 100.0
示例#14
0
def live_record2(filters, filters_fq, time_res, amp_res, fs, last, formants, drc_tl, drc_th, drc_r, duration, adc_res, predict):
    import pyaudio
    # Taille des blocs enregistrés
    chunk_size = 4096

    # Barre de progression
    maxi = int(np.ceil(fs / chunk_size * duration))
    progress = FloatProgress(min=0, max=maxi, description="Chargement")
    display(progress)

    # Initialisation
    plt.ion()
    f, ax = plt.subplots(2, 1, figsize=(24, 12), dpi= 80, facecolor="w", edgecolor="k")
    f.show()

    # Boucle principale
    try:
        while True:
            # Ouverture du flux
            p = pyaudio.PyAudio()
            stream = p.open(format=pyaudio.paInt16, channels=1, rate=fs, input=True, frames_per_buffer=chunk_size)

            # Enregistrement
            frames = []
            progress.value = progress.min
            progress.description = "Enregistrement"
            progress.bar_style = ""
            for i in range(0, maxi):
                data = stream.read(chunk_size)
                frames.append(data)
                progress.value = i
            # Mise à jour de la barre de progression
            progress.value = progress.max
            progress.description = "Terminé !"
            progress.bar_style = "success"

            # Fermeture du flux
            stream.stop_stream()
            stream.close()
            p.terminate()

            # Enregistrement
            wf = wave.open("src/record.wav", "wb")
            wf.setnchannels(1)
            wf.setsampwidth(p.get_sample_size(pyaudio.paInt16))
            wf.setframerate(fs)
            wf.writeframes(b''.join(frames))
            wf.close()

            # Traitement
            ax[0].clear() ; ax[1].clear()
            _, _, rseqs = compute(file="src/record.wav", filters=filters, filters_fq=filters_fq, time_res=time_res, amp_res=amp_res, ax=ax, dbfs=False, drc_tl=drc_tl, drc_th=drc_th, drc_r=drc_r, adc_res=adc_res)

            # Prédiction
            if predict:
                f.suptitle(predict(rseqs, name=False, debug=False)[0], fontsize=30)

            # Pas très joli, mais évite le NonImplemented error ?
            try:
                plt.pause(3)
            except Exception:
                continue
    # Masque l'erreur en cas d'interruption du Kernel
    except KeyboardInterrupt:
        # Fermeture du flux
        stream.stop_stream()
        stream.close()
        p.terminate()
        clear_output()
        plt.close()
        print("Terminé !")
    return
示例#15
0

#%%

# *** MAIN ROUTINE ***/

# All units in m

# Definitions file
defs_fName = "Best fit mesh - Definitions file.csv"
defs_data = npy.genfromtxt(defs_fName, dtype=None, delimiter=",", names=True)
nFiles = defs_data.shape[0]

# Initialise progress bar
pBar = FloatProgress(min=0, max=nFiles)
pBar.description = "Processing point cloud data"
display(pBar)

for f in arange(0, nFiles):

    # Update progress bar
    pBar.value += 1

    # Read details from definitions file
    fName = defs_data["Filename"][f].decode('UTF-8')
    OD = defs_data["OD"][f]  # outer diameter in m
    t = defs_data["t"][f]  # wall thickness in m
    L = defs_data["L"][f]  # segment length / spacing between ring flanges
    Le_x_target = defs_data["Le_x_target"][
        f]  # target length of elements in x direction
    Le_y_target = defs_data["Le_y_target"][