def iterate_one(self):
    s=self

    parents  = s._X.copy()
    children = s._mutate_and_crossover()
    new_population=np.concatenate((parents,children),axis=0)
    new_Y=s.cost_function(new_population)

    truncated_Y,rank,cf,indices=base.truncate(new_Y,s.population_size)

    s._X=new_population[indices].copy()
    s._Y=truncated_Y.copy()

    return
Esempio n. 2
0
    def fit(self, X, cls_label, ses_label):
        global_tick = time.time()
        print "Reordering data..."
        data = base.reorder_cls_ses(X, cls_label, ses_label)
        X = data['data']
        cls_label = data['cls']
        ses_label = data['ses']
        self.update_iters(X, cls_label, ses_label)
        data = base.truncate(X, cls_label, ses_label, self.iters['cls_ind'])
        X = data['data']
        cls_label = data['cls']
        ses_label = data['ses']
        ind = data['ind']
        self.iters['cls_ind'] = ind
        self.H.set_value(self.H.get_value()[0:X.shape[0], ])
        if self.normalize:
            self.normalize_W_H()

        buff = self.generate_buffer_from_lbl(X, cls_label, ses_label,
                                             random=True, truncate=True)
        if self.buff_size > X.shape[0]:
            self.X_buff.set_value(X.astype(theano.config.floatX))
        self.scores.append(self.score_buffer(X, buff))
        print 'Fitting NMF model with %d iterations....' % self.n_iter
        for it in range(self.n_iter):
            if self.dist_mode == 'iter':
                for i in range(int(np.max(cls_label)+1)):
                    Sci = np.hstack(np.where(self.iters['cls'][:,0] == i))
                    if Sci.shape[0] > 0:
                        self.class_sum(i, self.n_components, Sci)
                for i in range(int(np.max(ses_label)+1)):  
                    Csi = np.hstack(np.where(self.iters['cls'][:,1] == i))
                    if Csi.shape[0] > 0:
                        self.ses_sum(i, self.n_components, Csi)
                    
            if self.verbose > 0:
                if (it+1) % self.verbose == 0:
                    if 'tick' not in locals():
                        tick = time.time()
                    print '\n\n NMF model, iteration {0}/{1}'.format(it+1,
                                                                     self.n_iter)
            buff = self.generate_buffer_from_lbl(X, cls_label, ses_label,
                                                 random=True, truncate=True)
            self.update_buffer(X, buff, it)
            if self.normalize:
                self.normalize_W_H()
            if self.verbose > 0:
                if (it+1) % self.verbose == 0:
                    self.scores.append(self.score_buffer(X, buff))
                    if self.NMF_updates == 'beta':
                        if self.scores[-1] > 0:
                            print 'Score: %.1f' % self.score[-1]
                    else:
                        if self.scores[-1][0][0] > 0:
                            print 'Score: %.1f' % self.scores[-1][0][0]
                            print 'Beta-divergence: %.1f' % self.scores[-1][0][1]
                            print 'Class distance : %.1f (%.1f)' % (self.scores[-1][0][2]*self.lambdas[0],
                                                                    self.scores[-1][0][2])
                            print 'Session distance : %.1f (%.1f)' % (self.scores[-1][0][3]*self.lambdas[1],
                                                                      self.scores[-1][0][3])                    
                            print 'Duration=%.1fms' % ((time.time() - tick) * 1000)
                    sys.stdout.flush()
        print 'Total duration=%.1fms' % ((time.time() - global_tick) * 1000)
  def iterate_one(self):
    s=self
    #calculating inertia weight:
    s._w=s._w*s.inertia_decay_rate
    w=s._w
    #particle movement:
    #nearest_global_best=np.zeros((s.population_size,s.n_dimensions)) #distance in fitness space
    #nearest_projected_best=np.zeros((s.population_size,s.n_dimensions)) #virtual point which ponders the nearest ones to guess the nearest one on the pareto border
    nearest_neighbour=np.zeros((s.population_size,s.n_dimensions))   #distance in fitness space
    for i in range(s.population_size):
      ##getting nearest global best
      #aux=(((s._global_bestY-s._Y[i])**2).sum(axis=1))**0.5
      #aux2=np.where(aux==np.min(aux))[0][0]
      #temp=s._global_bestX[aux2]
      #nearest_global_best[i]=s._global_bestX[aux2]

      ##getting nearest projected best:
      #aux=(((s._global_bestY-s._Y[i])**2).sum(axis=1))**0.5
      #aux2=np.argsort(aux)[:s.n_objectives] #get as many points as necessary to interpolate the pareto front nearest to the point
      #if(aux[aux2[0]]==0):
      #  nearest_projected_best[i]=s._global_bestX[aux2[0]]
      #else:
      #  nearest_projected_best[i]=(s._global_bestX[aux2]*((aux[aux2]**-1).reshape(-1,1))/np.sum(aux[aux2]**-1)).sum(axis=0)

      #getting nearest neighbour
      aux=(((s._Y-s._Y[i])**2).sum(axis=1))**0.5
      aux[i]=np.inf
      aux2=np.where(aux==np.min(aux))[0][0]
      nearest_neighbour[i]=s._X[aux2]    
    
    #if(s._iter%5==0):
    #  o_global=base.component_sort_order(s._global_bestY)
    #  o_current=base.component_sort_order(s._Y)
    #  o_global=o_global*len(o_current)/len(o_global) #make sure the range is the same
    #  s.nearest_global_best_by_order=np.zeros((s.population_size,s.n_dimensions)) #distance in fitness space
    #  taken=np.zeros(len(o_global),'bool')
    #  for i in range(s.population_size):
    #    #getting nearest by order:
    #    aux=(((o_global-o_current[i])**2).sum(axis=1))**0.5
    #    aux2=np.argsort(aux)
    #    for j in aux2:
    #      if not taken[j]:
    #        break
    #    s.nearest_global_best_by_order[i]=s._global_bestX[j]
    #    taken[j]=True
    
    if(s._iter%5==0):
      aux=len(s._global_bestX)*(np.arange(s.population_size))/s.population_size
      #s.designated_target=s._global_bestX[aux.astype('int')]
      s.designated_target=s._global_bestX[aux[np.random.permutation(len(aux))].astype('int')]
    
    r1=np.random.random((s.population_size,s.n_dimensions))
    r2=np.random.random((s.population_size,s.n_dimensions))
    r3=np.random.random((s.population_size,s.n_dimensions))
    #r4=np.random.random((s.population_size,s.n_dimensions))-0.5
    s._V= w*s._V + \
          s.cognitive_coefficient*r1*(s._individual_bestX-s._X) +\
          s.social_coefficient*r2*(s.designated_target-s._X) +\
          - r3*s.repulsion_coefficient*(nearest_neighbour-s._X) #+\
          #r4*0.00001
          #s.social_coefficient*r2*(s.nearest_global_best_by_order-s._X) +\
          #
          #s.social_coefficient*r2*(nearest_projected_best-s._X) +\
          #- r3*s.repulsion_coefficient*(nearest_neighbour-s._X)# +\
          #s.social_coefficient*r2*(nearest_global_best-s._X) + \
 
    #applying speed limit:
    vnorm=((s._V**2).sum(axis=1))**0.5 #norm of the speed
    aux=np.where(vnorm>s.maximum_velocity) #particles with velocity greater than expected
    s._V[aux]=s._V[aux]*s.maximum_velocity/(vnorm[aux].reshape((-1,1))) #clipping the speed to the maximum speed

    #update solutions:
    s._X=s._X+s._V

    #clipping the search space
    s._X=np.minimum(s.ub,s._X)
    s._X=np.maximum(s.lb,s._X)

    #fitness value calculation:
    s._Y = s.cost_function(s._X)  # current particle cost

    #update memories:
    temp_Y=np.concatenate((s._global_bestY,s._Y),axis=0).copy()
    temp_X=np.concatenate((s._global_bestX,s._X),axis=0).copy()
    Y,rank,cf,indices=base.truncate(temp_Y,1*s.population_size)
    #s._global_bestX=temp_X[indices][np.where(rank==1)[0]]
    #s._global_bestY=temp_Y[indices][np.where(rank==1)[0]]
    s._global_bestX=temp_X[indices]
    s._global_bestY=temp_Y[indices]
    

    #debugging code:
    #for i in s._global_bestY:
    #  assert((temp_Y>=i).any(axis=1).all(axis=0))
    #print('len_global',len(s._global_bestX),len(s._global_bestY))
    
    #updating individual best:
    for i in range(s.population_size):
      if (s._individual_bestY[i]>=s._Y[i]).all() and (s._individual_bestY[i]>s._Y[i]).any():
        #new solution dominates old one
        s._individual_bestY[i]=s._Y[i]
        s._individual_bestX[i]=s._X[i]
      elif (s._individual_bestY[i]<s._Y[i]).any():
        #new solution not dominated by the old one. See if it approaches the pareto front best
        #'''
        aux=(((s._global_bestY-s._Y[i])**2).sum(axis=1))**0.5
        aux2=np.argsort(aux)[:s.n_objectives] #get as many points as necessary to interpolate the pareto front nearest to the point
        if(aux[aux2[0]]==0):
          aux3=s._global_bestY[aux2[0]]
        else:
          #average the nearby points weighted by the inverse distance:
          aux3=(s._global_bestY[aux2]*((aux[aux2]**-1).reshape(-1,1))/np.sum(aux[aux2]**-1)).sum(axis=0)
        new_min=((aux3-s._Y[i])**2).sum()**0.5

        aux=(((s._global_bestY-s._individual_bestY[i])**2).sum(axis=1))**0.5
        aux2=np.argsort(aux)[:s.n_objectives] #get as many points as necessary to interpolate the pareto front nearest to the point
        if(aux[aux2[0]]==0):
          aux3=s._global_bestY[aux2[0]]
        else:
          #average the nearby points weighted by the inverse distance:
          aux3=(s._global_bestY[aux2]*((aux[aux2]**-1).reshape(-1,1))/np.sum(aux[aux2]**-1)).sum(axis=0)
        old_min=((aux3-s._individual_bestY[i])**2).sum()**0.5
        #'''
        '''
        aux=(((s._global_bestY-s._Y[i])**2).sum(axis=1))**0.5
        new_min=np.min(aux)
        aux=(((s._global_bestY-s._individual_bestY[i])**2).sum(axis=1))**0.5
        old_min=np.min(aux)
        '''
        if(old_min>new_min):
          s._individual_bestY[i]=s._Y[i]
          s._individual_bestX[i]=s._X[i]

    return