Ejemplo n.º 1
0
def main(argv):
    eta_file = ''
    gamma_file = ''
    output_file = ''
    try:
        opts, args = getopt.getopt(argv,"he:g:o:",["eta_file=","gamma_file=","output_file="])
    except getopt.GetoptError:
        print 'GenModel.py -e <eta_file> -g <gamma_file> -o <ouput_file>'
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print 'GenModel.py -e <eta_file> -g <gamma_file> -o <ouput_file>'
            sys.exit()
        elif opt in ("-e", "--eta_file"):
            eta_file = arg
        elif opt in ("-g", "--gamma_file"):
            gamma_file = arg
        elif opt in ("-o", "--output_file"):
            output_file = arg

    eta = p.read_csv(eta_file,index_col=0) 
    pi = p.read_csv(gamma_file,index_col=0)
    import ipdb; ipdb.set_trace()
    sample = CSample(pi, eta)
    sample.sampleReads()
    rownames = eta.columns.values.tolist()
    ca = np.array(sample.con_counts)
    counts_df = p.DataFrame(data=ca,index=rownames,columns=pi.index.tolist()) 
    counts_df.to_csv(output_file) 
    import ipdb; ipdb.set_trace() 
Ejemplo n.º 2
0
def scrape_json(url):

    info = json.loads(request.urlopen(url).read().decode())
    for match in info['el']:

        m = re.match(time_regex, match['ml'][0]['dd'])

        if m is None:
            print("Regex failed: %s" % match['ml'][0]['dd'])
            continue

        sql_date    = m.group(1)
        clock_time  = m.group(2)[0:5]

        home_team   = match['epl'][0]['pn']
        away_team   = match['epl'][1]['pn']

        if not home_team or not away_team:

            teams = match['en'].split(" - ")
            if len(teams) != 2:
                print("Team names failed")
                import ipdb; ipdb.set_trace()
                continue

            home_team = teams[0]
            away_team = teams[1]

        comp = match['scn']
        odds = {}

        for msl in match['ml'][0]['msl']:
            odds[msl['mst']] = msl['msp']
        
        db.process_match(comp, home_team, away_team, sql_date, clock_time, site, odds)
Ejemplo n.º 3
0
   def pycode(self):
      global _classes_used
      _classes_used = {}

      import ipdb; ipdb.set_trace()


      parts = []
      if self.statement_list:
         parts.append(pycode(self.statement_list))
      if self.program:
         parts.append(pycode(self.program))

      ec = i2py_map.get_extra_code()
      if ec:
         parts.append(ec)

      for cname in _classes_used:
         c = _classes_used[cname]
         if ~hasattr(c, 'init_def'):    # bare methods, no class definition or initializer method
            parts.append(('class %s(%s):\n'% (pycode(cname), config.baseclassname) \
                  + "\n".join([ pyindent(m) for m in c.methods])))
            continue
         parts.append(('class %s(%s):\n'% (pycode(cname), c.base_classes) \
               + "\n".join([ pyindent(m)
                     for m in ['__i2py_tagnames__ = %s\n' % (c.tag_names), c.init_def ] + c.methods])))

      if _classes_used:
         # IDL structs become objects of type I2PY_Struct, or subclasses thereof
         init_def = 'def __init__(self, *args, **kws):\n' \
               + pyindent('self.__dict__.update(zip(self.__i2py_tagnames__, args))\n') \
               + pyindent('self.__dict__.update(kws)')
         get_def = 'def __getitem__(self, key):\n' \
               + pyindent('return self.__dict__[self.__i2py_tagnames__[key]]')
         repr_def = 'def __repr__(self):\n'  \
               + pyindent('return "%s(%s)" % (self.__class__.__name__,\n') \
               + pyindent(pyindent('", ".join("%s=%s" % (k, v) for k, v in self.__dict__.iteritems()))'))
         parts.append(('class %s(object):\n'% config.baseclassname) \
               + pyindent('__i2py_tagnames__ = []') + '\n' \
               + pyindent(init_def) + '\n' \
               + pyindent(get_def) + '\n' \
               + pyindent(repr_def))


      parts.append('from %s import *' % config.arraymodule)
      # import ipdb; ipdb.set_trace()

      try:
         nl = self.NEWLINE[0]
      except TypeError:
         nl = self.NEWLINE
      if nl:
         doc = nl.asdocstring()
	 if doc:
	    parts.append(doc)
	 else:
            parts[0] = pycode(nl) + parts[0]

      parts.reverse()
      return '\n\n'.join(parts)
Ejemplo n.º 4
0
    def readLP(self, line):
        objects = line.split()
        operators = [ OPERATORS[var] for var in objects if var in OPERATORS ]

        symbol_in_func = []
        fval = 0
        for word in objects:
            if word in OPERATORS:
                operators.append(word)
                continue
            for letter in word:
                # if letter is a number
                constant = 1
                if ord(letter) > 48 and ord(letter) < 58:
                    constant = int(letter)
                if letter in self.sym_dict:
                    symbol_in_func.append(constant * self.sym_dict[letter])
                    continue
                fval = constant


        set_trace()

        c = Constraint(lambdafunc,
                       [ (symv, int(v)) for (symv, v) in zip(symvars, variables)], 
                       D, self)
def pageWalker():
    global prevTrail, driver
    CSS=driver.find_elements_by_css_selector
    try:
        startIdx = getStartIdx()
        startPage = startIdx+1
        curPage = 1
        idx = curPage-1
        while idx != startPage-1:
            ulog('idx=%d,page=%d'%(idx, (idx+1)))
            pages = getElems('.x-page-com a')
            def pageNum(p):
                try:
                    return int(p.text.strip())
                except ValueError:
                    pass
                href = p.get_attribute('href')
                if not href:
                    return sys.maxsize
                try:
                    return int(re.search(r'void\((.+)\)', href).group(1))
                except Exception as ex:
                    ipdb.set_trace()
                    traceback.print_exc()
            tarPage = min(pages, key=lambda p: abs(startPage - pageNum(p)))
            ulog('tarPage=%d'%pageNum(tarPage))
            tarPage.click()
            ulog('tarPage.click()')
            time.sleep(0.5)
            retryUntilTrue(lambda:len(CSS('.x-waite'))==1, 16, 0.4 )
            uprint('waitCursor shows')
            retryUntilTrue(lambda:len(CSS('.x-waite'))==0 or 
                    CSS('.x-waite')[0].is_displayed()==False, 60, 1 )
            uprint('waitCursor disappears')
            curPage = int(waitText('a.cur'))
            ulog('curPage=%d'%curPage)
            idx = curPage-1

        for idx in itertools.count(startIdx):
            ulog('idx=%d,page=%d'%(idx, (idx+1)))
            prevTrail+=[idx]
            rowWalker()
            prevTrail.pop()
            try:
                nextPage = waitClickable('.x-next-on')
            except (NoSuchElementException, TimeoutException):
                ulog('last page')
                break
            nextPage.click()
            ulog('nextPage.click()')
            time.sleep(0.5)
            retryUntilTrue(lambda:len(CSS('.x-waite'))==1, 16, 0.4 )
            uprint('waitCursor shows')
            retryUntilTrue(lambda:len(CSS('.x-waite'))==0 or 
                    CSS('.x-waite')[0].is_displayed()==False, 60, 1 )
            uprint('waitCursor disappears')

    except Exception as ex:
        ipdb.set_trace()
        traceback.print_exc()
Ejemplo n.º 6
0
    def _on_page(self, page):
        if not page:
            import ipdb
            ipdb.set_trace()

        soup = BeautifulSoup(page)
        if not soup.find('a', text='Log in'):
            event = soup.find('b', text='Something has happened!')
            if event:
                cell = event.findParent('table').findAll('td')[2]
                text = ''.join([x.text if hasattr(x, 'text') else x
                        for x in cell.childGenerator()])
                self._logger.info("Something has happned: %s", text)

            try:
                self._neopoints = get_np(soup)
            except NoNpInPage:
                pass

            return soup

        self._logger.info('Need to login. Using account %s', self._username)
        data = dict(username=self._username, password=self._password,
                    destination=soup.find(
                        'input', attrs=dict(name='destination'))['value'])
        d = self._browser.post('http://www.neopets.com/login.phtml', data)
        d.addCallback(self._on_login)
        return d
Ejemplo n.º 7
0
 def _train(self):
     """Fit the classifier with the training set plus the new vectors and
     features. Then performs a step of EM.
     """
     try:
         if len(self.user_corpus):
             self.classifier.fit(
                 vstack((self.training_corpus.instances,
                         self.user_corpus.instances), format='csr'),
                 (self.training_corpus.primary_targets +
                  self.user_corpus.primary_targets),
                 features=self.user_features
             )
         else:
             self.classifier.fit(self.training_corpus.instances,
                                 self.training_corpus.primary_targets,
                                 features=self.user_features)
     except ValueError:
         import ipdb; ipdb.set_trace()
     self.recorded_precision.append({
         'testing_precision' : self.evaluate_test(),
         'training_precision' : self.evaluate_training(),
         'new_instances' : self.new_instances,
         'new_features' : self.new_features,
         'confusion_matrix': confusion_matrix(
             self.test_corpus.primary_targets,
             self.predict(self.test_corpus.instances)
         ),
         'feature_boost': self.feature_boost
     })
     self.new_instances = 0
     self.new_features = 0
     self.classes = self.classifier.classes_.tolist()
     self._retrained = True
Ejemplo n.º 8
0
def solve(a, b):
    if b.ndim == 1:
        return tf.reshape(tf.matmul(tf.matrix_inverse(a), tf.expand_dims(b, -1)), [-1])
    elif b.ndim == 2:
        return tf.matmul(tf.matrix_inverse(a), b)
    else:
        import ipdb; ipdb.set_trace()
Ejemplo n.º 9
0
def ast_stripping_op(f, *args, **kwargs):
    the_solver = kwargs.pop('the_solver', None)
    if _timing_enabled:
        the_solver = args[0] if the_solver is None else the_solver
        s = the_solver.state

        start = time.time()
        r = _actual_ast_stripping_op(f, *args, **kwargs)
        end = time.time()
        duration = end-start

        if s.scratch.sim_procedure is None and s.scratch.bbl_addr is not None:
            location = "bbl 0x%x, stmt %d (inst 0x%x)" % (s.scratch.bbl_addr, s.scratch.stmt_idx, s.scratch.ins_addr)
        elif s.scratch.sim_procedure is not None:
            location = "sim_procedure %s" % s.scratch.sim_procedure
        else:
            location = "unknown"
        lt.log(int((end-start)*10), '%s took %s seconds at %s', f.__name__, round(duration, 2), location)

        if break_time >= 0 and duration > break_time:
            import ipdb; ipdb.set_trace()
    else:
        r = _actual_ast_stripping_op(f, *args, **kwargs)

    return r
Ejemplo n.º 10
0
def study_sdss_density(hemi='south'):
    grid = grid3d(hemi=hemi)
    n_data = num_sdss_data_both_catalogs(hemi, grid)
    n_rand, weight = num_sdss_rand_both_catalogs(hemi, grid)
    n_rand *= ((n_data*weight).sum() / (n_rand*weight).sum())
    delta = (n_data - n_rand) / n_rand
    delta[weight==0]=0.
    fdelta = np.fft.fftn(delta*weight)
    power = np.abs(fdelta)**2.
    ks = get_wavenumbers(delta.shape, grid.reso_mpc)
    kmag = ks[3]
    kbin = np.arange(0,0.06,0.002)
    ind = np.digitize(kmag.ravel(), kbin)
    power_ravel = power.ravel()
    power_bin = np.zeros_like(kbin)
    for i in range(len(kbin)):
        print i
        wh = np.where(ind==i)[0]
        power_bin[i] = power_ravel[wh].mean()
    #pl.clf()
    #pl.plot(kbin, power_bin)
    from cosmolopy import perturbation
    pk = perturbation.power_spectrum(kbin, 0.4, **cosmo)
    pl.clf(); pl.plot(kbin, power_bin/pk, 'b')
    pl.plot(kbin, power_bin/pk, 'bo')    
    pl.xlabel('k (1/Mpc)',fontsize=16)
    pl.ylabel('P(k) ratio, DATA/THEORY [arb. norm.]',fontsize=16)
    ipdb.set_trace()
Ejemplo n.º 11
0
def make_corr1d_fig(dosave=False):
    corr = make_corr_both_hemi()
    lw=2; fs=16
    pl.figure(1)#, figsize=(8, 7))
    pl.clf()
    pl.xlim(4,300)
    pl.ylim(-400,+500)    
    lambda_titles = [r'$20 < \lambda < 30$',
                     r'$30 < \lambda < 40$',
                     r'$\lambda > 40$']
    colors = ['blue','green','red']
    for i in range(3):
        corr1d, rcen = corr_1d_from_2d(corr[i])
        ipdb.set_trace()
        pl.semilogx(rcen, corr1d*rcen**2, lw=lw, color=colors[i])
        #pl.semilogx(rcen, corr1d*rcen**2, 'o', lw=lw, color=colors[i])
    pl.xlabel(r'$s (Mpc)$',fontsize=fs)
    pl.ylabel(r'$s^2 \xi_0(s)$', fontsize=fs)    
    pl.legend(lambda_titles, 'lower left', fontsize=fs+3)
    pl.plot([.1,10000],[0,0],'k--')
    s_bao = 149.28
    pl.plot([s_bao, s_bao],[-9e9,+9e9],'k--')
    pl.text(s_bao*1.03, 420, 'BAO scale')
    pl.text(s_bao*1.03, 370, '%0.1f Mpc'%s_bao)
    if dosave: pl.savefig('xi1d_3bin.pdf')
Ejemplo n.º 12
0
def study_redmapper_lrg_3d(hemi='north'):
    # create 3d grid object
    grid = grid3d(hemi=hemi)
    
    # load SDSS data
    sdss = load_sdss_data_both_catalogs(hemi)
    
    # load redmapper catalog
    rm = load_redmapper(hemi=hemi)
    
    # get XYZ positions (Mpc) of both datasets
    x_sdss, y_sdss, z_sdss = grid.xyz_from_radecz(sdss['ra'], sdss['dec'], sdss['z'], applyzcut=False)
    x_rm, y_rm, z_rm = grid.xyz_from_radecz(rm['ra'], rm['dec'], rm['z_spec'], applyzcut=False)
    pos_sdss = np.vstack([x_sdss, y_sdss, z_sdss]).T
    pos_rm = np.vstack([x_rm, y_rm, z_rm]).T

    # build a couple of KDTree's, one for SDSS, one for RM.
    from sklearn.neighbors import KDTree
    tree_sdss = KDTree(pos_sdss, leaf_size=30)
    tree_rm = KDTree(pos_rm, leaf_size=30)

    lrg_counts = tree_sdss.query_radius(pos_rm, 100., count_only=True)
    pl.clf()
    pl.hist(lrg_counts, bins=50)
    
    
    ipdb.set_trace()
Ejemplo n.º 13
0
def study_redmapper_2d():
    # I just want to know the typical angular separation for RM clusters.
    # I'm going to do this in a lazy way.
    hemi = 'north'
    rm = load_redmapper(hemi=hemi)
    ra = rm['ra']
    dec = rm['dec']
    ncl = len(ra)
    dist = np.zeros((ncl, ncl))
    for i in range(ncl):
        this_ra = ra[i]
        this_dec = dec[i]
        dra = this_ra-ra
        ddec = this_dec-dec
        dxdec = dra*np.cos(this_dec*np.pi/180.)
        dd = np.sqrt(dxdec**2. + ddec**2.)
        dist[i,:] = dd
        dist[i,i] = 99999999.
    d_near_arcmin = dist.min(0)*60.
    pl.clf(); pl.hist(d_near_arcmin, bins=100)
    pl.title('Distance to Nearest Neighbor for RM clusters')
    pl.xlabel('Distance (arcmin)')
    pl.ylabel('N')
    fwhm_planck_217 = 5.5 # arcmin
    sigma = fwhm_planck_217/2.355
    frac_2sigma = 1.*len(np.where(d_near_arcmin>2.*sigma)[0])/len(d_near_arcmin)
    frac_3sigma = 1.*len(np.where(d_near_arcmin>3.*sigma)[0])/len(d_near_arcmin)
    print '%0.3f percent of RM clusters are separated by 2-sigma_planck_beam'%(100.*frac_2sigma)
    print '%0.3f percent of RM clusters are separated by 3-sigma_planck_beam'%(100.*frac_3sigma)    
    ipdb.set_trace()
Ejemplo n.º 14
0
def parse_page(html_source):
	eventlist_info = {}
	eventlist_info['events'] = {}
	eventlist_info['years'] = []
	html_source = utils.fix_invalid_table(html_source)
	soup = bs4.BeautifulSoup(html_source)
	soup_pref = soup.find(id='pref')
	soup_innermain = soup_pref.parent
	if soup_innermain == None:
		return None	# info not found

	# parse event list
	soup_table = soup_innermain.find('table')
	for soup_cell in soup_table.find_all('td'):
		for soup_year in soup_cell.find_all('h3', recursive=False):
			year = unicode(soup_year.string)
			if year not in eventlist_info['years']:
				eventlist_info['years'].append(year)
			if not eventlist_info['events'].has_key(year):
				eventlist_info['events'][year] = []
			if not soup_year.find_next('ul'):
				import ipdb; ipdb.set_trace()
			year_eventlist = _parse_eventlist(soup_year.find_next('ul'))
			eventlist_info['events'][year].extend(year_eventlist)
			eventlist_info['events'][year].sort(key=itemgetter('startdate'))
	eventlist_info['years'].sort()
	
	# parse page meat
	eventlist_info['meta'] = {}
	
	return eventlist_info
Ejemplo n.º 15
0
    def run(self, n=None):
        """
        Runs the analysis through completion (until done() returns True) or,
        if n is provided, n times.

            @params n: the maximum number of ticks
            @returns itself for chaining
        """
        global STOP_RUNS, PAUSE_RUNS  # pylint: disable=W0602,

        # We do a round of filtering first
        self.active = self.filter_paths(self.active)

        while not self.done and (n is None or n > 0):
            self.step()

            if STOP_RUNS:
                l.warning("%s stopping due to STOP_RUNS being set.", self)
                l.warning("... please call resume_analyses() and then this.run() if you want to resume execution.")
                break

            if PAUSE_RUNS:
                l.warning("%s pausing due to PAUSE_RUNS being set.", self)
                l.warning("... please call disable_singlestep() before continuing if you don't want to single-step.")

                try:
                    import ipdb as pdb  # pylint: disable=F0401,
                except ImportError:
                    import pdb
                pdb.set_trace()

            if n is not None:
                n -= 1
        return self
Ejemplo n.º 16
0
 def si_basic(self):
     #        ref = self.ret[1]['coll']
     #        it = RawIterator(ref)
     #        for row in it:
     #            print row
     import ipdb;
     ipdb.set_trace()
Ejemplo n.º 17
0
def graspit_grasp_pose_to_moveit_grasp_pose(move_group_commander, listener, graspit_grasp_msg,
                                            grasp_frame='/approach_tran'):
    """
    :param move_group_commander: A move_group command from which to get the end effector link.
    :type move_group_commander: moveit_commander.MoveGroupCommander
    :param listener: A transformer for looking up the transformation
    :type tf.TransformListener
    :param graspit_grasp_msg: A graspit grasp message
    :type graspit_grasp_msg: graspit_msgs.msg.Grasp

    """

    try:
        listener.waitForTransform(grasp_frame, move_group_commander.get_end_effector_link(),
                                     rospy.Time(0), timeout=rospy.Duration(1))
        at_to_ee_tran, at_to_ee_rot = listener.lookupTransform(grasp_frame, move_group_commander.get_end_effector_link(),rospy.Time())
    except:
        rospy.logerr("graspit_grasp_pose_to_moveit_grasp_pose::\n " +
                    "Failed to find transform from %s to %s"%(grasp_frame, move_group_commander.get_end_effector_link()))
        ipdb.set_trace()



    graspit_grasp_msg_final_grasp_tran_matrix = tf_conversions.toMatrix(tf_conversions.fromMsg(graspit_grasp_msg.final_grasp_pose))
    approach_tran_to_end_effector_tran_matrix = tf.TransformerROS().fromTranslationRotation(at_to_ee_tran, at_to_ee_rot)
    if move_group_commander.get_end_effector_link() == 'l_wrist_roll_link':
        rospy.logerr('This is a PR2\'s left arm so we have to rotate things.')
        pr2_is_weird_mat = tf.TransformerROS().fromTranslationRotation([0,0,0], tf.transformations.quaternion_from_euler(0,math.pi/2,0))
    else:
        pr2_is_weird_mat = tf.TransformerROS().fromTranslationRotation([0,0,0], [0,0,0])
    actual_ee_pose_matrix = np.dot( graspit_grasp_msg_final_grasp_tran_matrix, approach_tran_to_end_effector_tran_matrix)
    actual_ee_pose_matrix = np.dot(actual_ee_pose_matrix, pr2_is_weird_mat)
    actual_ee_pose = tf_conversions.toMsg(tf_conversions.fromMatrix(actual_ee_pose_matrix))
    rospy.loginfo("actual_ee_pose: " + str(actual_ee_pose))
    return actual_ee_pose
Ejemplo n.º 18
0
def udf(func):
    llvm_module = first(func._compileinfos.values()).library._final_module
    engine = ee.EngineBuilder.new(llvm_module).create()
    functions = [
        func for func in llvm_module.functions
        if not func.name.startswith('_') and not func.is_declaration
    ]
    addr = engine.get_function_address(functions[1].name)
    assert addr > 0, 'addr == %d' % addr

    # Declare the ctypes function prototype
    # functype = cfunctype(c_double, c_double)

    path = os.path.expanduser(
        os.path.join('~', 'ibis-data', 'ibis-testing-data', 'ibis-testing.db')
    )
    con = sqlite3_connection(path.encode('utf8'))
    result = register(
        con,
        addr,
        func.__name__.encode('utf8'),
        len(func.nopython_signatures[0].args)
    )
    import ipdb; ipdb.set_trace()
    con.execute("select mysin(1.0230923)".encode('utf8'))
Ejemplo n.º 19
0
def psf_airy_disk_kernel(wavelength_m, 
	l_px_m=None, 
	f_ratio=None,
	N_OS=None, 
	T_OS=8,
	detector_size_px=None,
	trunc_sigma=10.25,	# 10.25 corresponds to the 10th Airy ring		
	plotit=False):
	"""
		Returns an Airy disc PSF corresponding to an optical system with a given f ratio, pixel size and detector size at a specified wavelength_m.

		If the detector size is not specified, then the PSF is truncated at a radius of 8 * sigma, where sigma corresponds to the HWHM (to speed up convolutions made using this kernel)

		There are 3 ways to constrain the plate scale of the output PSF. One of either the f ratio, the pixel width or the Nyquist sampling factor (where a larger number ==> finer sampling) must be left unspecified, and will be constrained by the other two parameters.
	"""	

	# Now, we have to calculate what the EFFECTIVE f ratio needs to be to achieve the desired Nyquist oversampling in the returned PSF.
	if not f_ratio:
		f_ratio = 2 * N_OS / wavelength_m * np.deg2rad(206265 / 3600) * l_px_m
	elif not N_OS:
		N_OS = wavelength_m * f_ratio / 2 / np.deg2rad(206265 / 3600) / l_px_m
		ipdb.set_trace()	
	elif not l_px_m:
		l_px_m = wavelength_m * f_ratio / 2 / np.deg2rad(206265 / 3600) / N_OS	

	if not detector_size_px:
		psf_size = int(np.round(trunc_sigma * N_OS * 4))
		detector_size_px = (psf_size,psf_size)	

	# In the inputs to this function, do we need to specify the oversampling factor AND the f ratio and/or pixel widths?
	kernel = airy_disc(wavelength_m=wavelength_m, f_ratio=f_ratio, l_px_m=l_px_m, detector_size_px=detector_size_px, trapz_oversampling=T_OS, plotit=plotit)[0]	

	return kernel
Ejemplo n.º 20
0
 def _wrapper(self, crowd_response):
     import ipdb; ipdb.set_trace()
     task = choice_func(crowd_response)
     if task in [self.t1, self.t2]:
         return task
     else:
         raise CrowdChoiceError(value="Invalid return type for %s.choice(). Please use either %s or %s." % (self, self.t1, self.t2))
Ejemplo n.º 21
0
def test_uninitialized_reads(arch, starts):
    uninitialized_reads = projects['uninitialized_reads']
    cfg = uninitialized_reads[arch].analyses.CFG()
    for start in starts:
        uninitialized_reads[arch].analyses.VFG(start=start)
    vfg = uninitialized_reads[arch].vfg
    variable_seekr = angr.VariableSeekr(uninitialized_reads[arch], cfg, vfg)

    for start in starts:
        try:
            variable_seekr.construct(func_start=start)
        except AngrError:
            l.info('AngrError...')
            continue
        function_manager = cfg.function_manager
        for func_addr, _ in function_manager.functions.items():
            l.info("Function %xh", func_addr)
            variable_manager = variable_seekr.get_variable_manager(func_addr)
            if variable_manager is None:
                continue
            # TODO: Check the result returned
            l.info("Variables: ")
            for var in variable_manager.variables:
                if isinstance(var, angr.StackVariable):
                    l.info(var.detail_str())
                else:
                    l.info("%s(%d),  referenced at %08x", var, var._size, var._inst_addr)

    import ipdb; ipdb.set_trace()
Ejemplo n.º 22
0
    def draw_mask(self, img_h, img_w, pred_poly, pred_mask):
        batch_size = pred_poly.shape[0]

        pred_poly_lens = np.sum(pred_mask, axis=1)

        assert pred_poly_lens.shape[0] == batch_size == self.batch_size, '%s,%s,%s' % (
            str(pred_poly_lens.shape[0]), str(batch_size), str(self.batch_size))

        masks_imgs = []
        for i in range(batch_size):
            # Cleaning the polys
            p_poly = pred_poly[i][:pred_poly_lens[i], :]

            # Printing the mask
            # if self.draw_perimeter is False:
            try:
                mask1 = np.zeros((img_h, img_w))
                mask1 = polyutils.draw_poly(mask1, p_poly.astype(np.int))
                mask1 = np.reshape(mask1, [img_h, img_w, 1])
                # else:
                mask = polyutils.polygon_perimeter(p_poly.astype(np.int), img_side=28)
                mask = np.reshape(mask, [img_h, img_w, 1])
            except:
                import ipdb;
                ipdb.set_trace()

            mask = np.concatenate((mask, mask1), axis=2)

            masks_imgs.append(mask)
        masks_imgs = np.array(masks_imgs, dtype=np.float32)
        return np.reshape(masks_imgs, [self.batch_size, img_h, img_w, 2])
Ejemplo n.º 23
0
    def next(self):
        #if self.nsteps == self.length: raise StopIteration()

        nexts= {}
        intersections= {}
        nexts_distr= self._get_state_distr(self.actual_state)
        for state, prob in nexts_distr.iteritems():
            # si las notas que vengo tocando interseccion con las que debo tocar no es vacio
            intersections[state]= self.now_pitches.intersection(self.must_dict[self.length-self.nsteps][state])
            if len(intersections[state]) > 0:
                nexts[state]= prob

        if len(nexts) == 0:
            import ipdb;ipdb.set_trace()
            raise Exception('Impossible phrase: actual_state= %(actual_state)s, start_pitch: %(n0)s, %(n1)s end_pitch: %(nfs)s, length:%(length)s' % self.__dict__)

        s= sum(nexts.itervalues())
        for state, prob in nexts.iteritems():
            nexts[state]= prob/s
        
        next= RandomPicker(values=nexts).get_value()
        self.actual_state= next

        now_pitches= set()
        for p in intersections[next]:
            now_pitches.update(next.related_notes(p[0], p[1], self.available_notes, reverse=False))
        self.now_pitches= now_pitches

        self.history.append((self.now_pitches, self.actual_state))
        self.nsteps+=1
        return next
Ejemplo n.º 24
0
def main():
    global startTrail,prevTrail,driver,conn
    try:
        startTrail = [int(re.search(r'\d+', _).group(0)) for _ in sys.argv[1:]]
        uprint('startTrail=%s'%startTrail)
        conn = sqlite3.connect('netgear.sqlite3')
        sql("CREATE TABLE IF NOT EXISTS TFiles("
                "id INTEGER NOT NULL,"
                "vendor TEXT,"
                "model TEXT,"
                "revision TEXT,"
                "fw_date TEXT,"
                "fw_ver TEXT,"
                "file_name TEXT,"
                "file_size TEXT,"
                "page_url TEXT,"
                "file_url TEXT,"
                "tree_trail TEXT,"
                "file_sha1 TEXT,"
                "PRIMARY KEY (id),"
                "UNIQUE(vendor,model,revision,file_name)"
                ");")
        driver = harvest_utils.getFirefox()
        harvest_utils.driver= driver
        driver.get("http://downloadcenter.netgear.com/")
        prevTrail=[]
        # tmr = ClickOutOverlayTimer()
        # tmr.start()
        walkProdCat()
    except Exception as ex:
        traceback.print_exc(); ipdb.set_trace()
        driver.save_screenshot('netgear_exc.png')
    finally:
        driver.quit()
        conn.close()
Ejemplo n.º 25
0
def storeFile(modelName, fileItem):
    global driver, prevTrail
    try:
        try:
            fileUrl = fileItem.get_attribute('data-durl')
            if fileUrl is None:
                fileUrl = fileItem.get_attribute('href')
        except Exception as ex:
            fileUrl = fileItem.get_attribute('href')
        _, ext = path.splitext(fileUrl)
        if ext in ['.html', '.htm']:
            return
        fileName = fileItem.text.strip()
        vendor='Netgear'
        try:
            fwVer=re.search(r'(?<=Version\ )\d+(\.\d+)+',fileName, flags=re.I)\
                .group(0)
        except Exception as ex:
            fwVer=None
        pageUrl = driver.current_url
        rev=""
        trailStr=str(prevTrail)
        sql("INSERT OR REPLACE INTO TFiles (vendor, model,revision,"
            "fw_ver, file_name, "
            "page_url, file_url, tree_trail) VALUES"
            "(:vendor, :modelName, :rev,"
            ":fwVer,:fileName,"
            ":pageUrl,:fileUrl,:trailStr)",locals())
        ulog('UPSERT "%(modelName)s", '
            ' "%(fileName)s", %(fileUrl)s'%locals())
    except Exception as ex:
        ipdb.set_trace(); traceback.print_exc()
        driver.save_screenshot('netgear_exc.png')
Ejemplo n.º 26
0
def walkProd():
    global driver, prevTrail
    try:
        # click overlay advertisement popup left button "No Thanks"
        try:
            driver.find_element_by_css_selector("a.btn.close.fl-left").\
                    click()
        except (NoSuchElementException):
            pass

        zpath = ('#ctl00_ctl00_ctl00_mainContent_localizedContent_bodyCenter'+
                 '_adsPanel_lbProduct')
        waitTextChanged(zpath)
        curSel = Select(css(zpath))
        numProds = len(curSel.options)
        ulog("numProds=%d"%numProds)

        startIdx = getStartIdx()
        for idx in range(startIdx, numProds):
            curSel = Select(css(zpath))
            ulog("idx=%s"%idx)
            ulog('select "%s"'%curSel.options[idx].text)
            curSel.select_by_index(idx)
            prevTrail+=[idx]
            while True:
                ret = walkFile()
                if ret != TRY_AGAIN:
                    break
            if ret== PROC_GIVE_UP:
                ulog('"%s" is GIVE UP'% curSel.options[idx].text)
            prevTrail.pop()
        return PROC_OK
    except Exception as ex:
        traceback.print_exc(); ipdb.set_trace()
        driver.save_screenshot('netgear_exc.png')
Ejemplo n.º 27
0
def walkProdCat():
    global driver, prevTrail
    try:
        # click "Drilldown"
        waitClickable('#ctl00_ctl00_ctl00_mainContent_localizedContent_bodyCenter_BasicSearchPanel_btnAdvancedSearch')\
            .click()

        zpath = ('#ctl00_ctl00_ctl00_mainContent_localizedContent_bodyCenter_'+
                 'adsPanel_lbProductCategory')
        curSel = Select(css(zpath))
        numProdCats = len(curSel.options)
        ulog('numProdCats=%d'%numProdCats)

        startIdx = getStartIdx()
        for idx in range(startIdx, numProdCats):
            curSel = Select(css(zpath))
            ulog("idx=%s"%idx)
            ulog('select "%s"'%curSel.options[idx].text)
            curSel.select_by_index(idx)
            prevTrail+=[idx]
            walkProdFam()
            prevTrail.pop()
    except Exception as ex:
        traceback.print_exc(); ipdb.set_trace()
        driver.save_screenshot('netgear_exc.png')
Ejemplo n.º 28
0
 def beforeTest(self, test):
     import ipdb;ipdb.set_trace()
     """Inserts merker to the MM logs"""
     marker = self._get_markers_for_test(test.id())['start']
     midolman_hosts = service.get_all_containers('midolman')
     for midolman in midolman_hosts:
         midolman.set_log_marker(marker)
Ejemplo n.º 29
0
    def makepost(self, url, **params):
        if params.get('out_order_id', None) is None:
            params.update(out_order_id=self.order)
            self.order += 1
        if params.get('nonce', None) is None:
            params.update(nonce=self.nonce)
            self.nonce += 1

        req = requests.Request('POST', url=self.base_api_url+url, data=params)
        prepared = req.prepare()
        prepared.headers['api-sign'] = hashlib.sha256(
            "%s%s" % (prepared.body, self.privkey)).hexdigest()
        prepared.headers['public-key'] = self.pubkey
        try:
            response = self.sendrequest(prepared)
            #import ipdb; ipdb.set_trace()
            print "url = '%s'\nPOST body='%s'\nHeaders = '%s'\nResponse: '%s'" % (
                prepared.url, prepared.body, prepared.headers, response.content)
            try:
                return json.loads(response.content)
            except Exception, e:
                print e
                return dict(status=False) #### WRONG WAY! !!!! TEMPORRARY FIX!
        except Exception, e:
            print e
            #if response.status_code != 200:
            import ipdb; ipdb.set_trace()
            raise ConnectionError
def docproblem(fmt_string, *args, **kwargs):
    """Create a document formatting error message for the user.

    Returns an integer indiciating the how manieth encountered problem this
    is.

    (2.7-style) `fmt_string` will have `args` interpolated in, truncated if
    necessary (if you want to construct an untruncated).

    """
    fmt = unicode(fmt_string)
    global ERROR_COUNT # pylint: disable=W0603
    level = kwargs.pop('level', 'error')
    ERROR_COUNT += 1
    # make it so a unescaped string alone is not misinterpreted
    # as a format_string; strictly speaking this is a usage error
    # but this is unambiguous and more convenient
    msg = (fmt if not args
           else fmt.format(*map(_trunc, args)).replace('\n', ' '))
    err = OrderedDict(
        [('type', 'body'),
         ('id', 'docproblem%d' % ERROR_COUNT),
         ('body', msg)])
    if level == 'error':
        exit_code.final_exit_code |= exit_code.BODY_ERROR_EXIT
        if ON_ERROR == 'raise':
            raise RuntimeError('%r' % err)
        elif ON_ERROR == 'debug':
            import ipdb
            ipdb.set_trace()
        else:
            assert ON_ERROR == 'log'
    _parseable_error(err, level=level, **kwargs)
    return ERROR_COUNT
Ejemplo n.º 31
0
def augment_dataset(d, programs):
    programs = np.random.permutation(programs).tolist()
    for program_name, apt_name in tqdm(programs):

        augmented_progs_i = []
        augmented_progs_i_new_inst = []
        augmented_preconds_i = []
        state_list_i = []
        if program_name in d.keys():
            continue
        if multi_process:
            d[program_name] = str(current_process())
        if len(d.keys()) % 20 == 0 and verbose:
            print(len(d.keys()))

        state_file = program_name.replace('withoutconds', 'initstate').replace(
            '.txt', '.json')

        with open(program_name, 'r') as f:
            lines_program = f.readlines()
            program = lines_program[4:]

        with open(state_file, 'r') as fst:
            init_state = json.load(fst)

        # for every object, we list the objects that are inside, on etc.
        # they will need to be replaced by containers having the same
        # objects inside and on
        relations_per_object = {}
        for cstate in init_state:
            precond = [k for k in cstate.keys()][0]
            if precond in precondtorelation.keys():
                relation = precondtorelation[precond]
                object1 = cstate[precond][0][0]
                container = tuple(cstate[precond][1])
                if container not in relations_per_object.keys():
                    relations_per_object[container] = []
                relations_per_object[container] += [(object1, relation)]

        # Given all the containers, check which objects can go there
        object_replace_map = {}
        for container in relations_per_object.keys():
            replace_candidates = []
            for object_and_relation in relations_per_object[container]:
                if object_and_relation in merge_dict.keys():
                    replace_candidates.append(merge_dict[object_and_relation])

            # do a intersection of all the replace candidates
            intersection = []
            object_replace_map[container] = []
            # if there are objects we can replace
            if len(replace_candidates) > 0 and len(
                [l for l in replace_candidates if len(l) == 0]) == 0:
                intersection = list(
                    set.intersection(*[set(l) for l in replace_candidates]))
                candidates = [x for x in intersection if x != container[0]]
                if len(candidates) > 0:
                    # How many containers to replace
                    cont = random.randint(1, min(len(candidates), 5))
                    # sample candidates
                    if cont > 1:
                        object_replace = random.sample(candidates, cont - 1)
                        object_replace_map[container] += object_replace

        objects_prog = object_replace_map.keys()
        npgs = 0
        # Cont has, for each unique object, the number of objects we will replace it with
        cont = []
        for obj_and_id in objects_prog:
            cont.append(len(object_replace_map[obj_and_id]))

        # We obtain all the permutations given cont
        ori_precond = init_state
        recursive_selection = augmentation_utils.recursiveSelection(
            cont, 0, [])

        # For every permutation, we compute the new program
        for rec_id in recursive_selection:
            # change program
            new_lines = program
            precond_modif = copy.deepcopy(ori_precond)
            precond_modif = str(precond_modif).replace('\'', '\"')

            for iti, obj_and_id in enumerate(objects_prog):
                orign_object, idi = obj_and_id
                object_new = object_replace_map[obj_and_id][rec_id[iti]]
                new_lines = [
                    x.replace(
                        '<{}> ({})'.format(orign_object, idi),
                        '<{}> ({})'.format(
                            object_new.lower().replace(' ', '_'), idi))
                    for x in new_lines
                ]
                precond_modif = precond_modif.replace(
                    '[\"{}\", \"{}\"]'.format(orign_object, idi),
                    '[\"{}\", \"{}\"]'.format(
                        object_new.lower().replace(' ', '_'), idi))

            try:
                init_state = ast.literal_eval(precond_modif)
                (message, final_state, graph_state_list, input_graph,
                 id_mapping, info, graph_helper,
                 modified_script) = check_programs.check_script(
                     new_lines, init_state,
                     '../example_graphs/{}.json'.format(apt_name), None, False,
                     {}, {})
            except:
                ipdb.set_trace()

            # Convert the program
            lines_program_newinst = []
            for script_line in modified_script:
                script_line_str = '[{}]'.format(script_line.action.name)
                if script_line.object():
                    script_line_str += ' <{}> ({})'.format(
                        script_line.object().name,
                        script_line.object().instance)
                if script_line.subject():
                    script_line_str += ' <{}> ({})'.format(
                        script_line.subject().name,
                        script_line.subject().instance)

                for k, v in id_mapping.items():
                    obj_name, obj_number = k
                    id = v
                    script_line_str = script_line_str.replace(
                        '<{}> ({})'.format(obj_name, id),
                        '<{}> ({}.{})'.format(obj_name, obj_number, id))
                lines_program_newinst.append(script_line_str)

            augmented_progs_i_new_inst.append(lines_program_newinst)
            state_list_i.append(graph_state_list)
            augmented_progs_i.append(new_lines)
            augmented_preconds_i.append(init_state)
            npgs += 1
            if npgs > thres:
                break

        # The current program
        if write_augment_data:
            augmentation_utils.write_data(augmented_data_dir, program_name,
                                          augmented_progs_i)
            augmentation_utils.write_data(
                augmented_data_dir, program_name, augmented_progs_i_new_inst,
                'executable_programs/{}/'.format(apt_name))
            augmentation_utils.write_precond(augmented_data_dir, program_name,
                                             augmented_preconds_i)
            augmentation_utils.write_graph(augmented_data_dir, program_name,
                                           state_list_i, apt_name)
Ejemplo n.º 32
0
def train(
        dim_word=100,  # word vector dimensionality
        dim=1000,  # the number of LSTM units
        encoder='gru',
        decoder='gru_cond',
        patience=10,  # early stopping patience
        max_epochs=5000,
        finish_after=10000000,  # finish after this many updates
        dispFreq=100,
        decay_c=0.,  # L2 regularization penalty
        alpha_c=0.,  # alignment regularization
        clip_c=-1.,  # gradient clipping threshold
        lrate=0.01,  # learning rate
        n_words_src=100000,  # source vocabulary size
        n_words=100000,  # target vocabulary size
        maxlen=100,  # maximum length of the description
        optimizer='rmsprop',
        batch_size=16,
        valid_batch_size=16,
        saveto='model.npz',
        validFreq=1000,
        saveFreq=1000,  # save the parameters after every saveFreq updates
        sampleFreq=100,  # generate some samples after every sampleFreq
        datasets=[
            '/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.en.tok',
            '/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.fr.tok'
        ],
        valid_datasets=[
            '../data/dev/newstest2011.en.tok',
            '../data/dev/newstest2011.fr.tok'
        ],
        dictionaries=[
            '/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.en.tok.pkl',
            '/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.fr.tok.pkl'
        ],
        use_dropout=False,
        reload_=False):

    # Model options
    model_options = locals().copy()

    # load dictionaries and invert them
    worddicts = [None] * len(dictionaries)
    worddicts_r = [None] * len(dictionaries)
    for ii, dd in enumerate(dictionaries):
        with open(dd, 'rb') as f:
            worddicts[ii] = pkl.load(f)
        worddicts_r[ii] = dict()
        for kk, vv in worddicts[ii].iteritems():
            worddicts_r[ii][vv] = kk

    # reload options
    if reload_ and os.path.exists(saveto):
        with open('%s.pkl' % saveto, 'rb') as f:
            models_options = pkl.load(f)

    print 'Loading data'
    train = TextIterator(datasets[0],
                         datasets[1],
                         dictionaries[0],
                         dictionaries[1],
                         n_words_source=n_words_src,
                         n_words_target=n_words,
                         batch_size=batch_size,
                         maxlen=maxlen)
    valid = TextIterator(valid_datasets[0],
                         valid_datasets[1],
                         dictionaries[0],
                         dictionaries[1],
                         n_words_source=n_words_src,
                         n_words_target=n_words,
                         batch_size=valid_batch_size,
                         maxlen=maxlen)

    print 'Building model'
    params = init_params(model_options)
    # reload parameters
    if reload_ and os.path.exists(saveto):
        params = load_params(saveto, params)

    tparams = init_tparams(params)

    trng, use_noise, \
        x, x_mask, y, y_mask, \
        opt_ret, \
        cost = \
        build_model(tparams, model_options)
    inps = [x, x_mask, y, y_mask]

    print 'Buliding sampler'
    f_init, f_next = build_sampler(tparams, model_options, trng)

    # before any regularizer
    print 'Building f_log_probs...',
    f_log_probs = theano.function(inps, cost, profile=profile)
    print 'Done'

    cost = cost.mean()

    # apply L2 regularization on weights
    if decay_c > 0.:
        decay_c = theano.shared(numpy.float32(decay_c), name='decay_c')
        weight_decay = 0.
        for kk, vv in tparams.iteritems():
            weight_decay += (vv**2).sum()
        weight_decay *= decay_c
        cost += weight_decay

    # regularize the alpha weights
    if alpha_c > 0. and not model_options['decoder'].endswith('simple'):
        alpha_c = theano.shared(numpy.float32(alpha_c), name='alpha_c')
        alpha_reg = alpha_c * (
            (tensor.cast(y_mask.sum(0) // x_mask.sum(0), 'float32')[:, None] -
             opt_ret['dec_alphas'].sum(0))**2).sum(1).mean()
        cost += alpha_reg

    # after all regularizers - compile the computational graph for cost
    print 'Building f_cost...',
    f_cost = theano.function(inps, cost, profile=profile)
    print 'Done'

    print 'Computing gradient...',
    grads = tensor.grad(cost, wrt=itemlist(tparams))
    print 'Done'

    # apply gradient clipping here
    if clip_c > 0.:
        g2 = 0.
        for g in grads:
            g2 += (g**2).sum()
        new_grads = []
        for g in grads:
            new_grads.append(
                tensor.switch(g2 > (clip_c**2), g / tensor.sqrt(g2) * clip_c,
                              g))
        grads = new_grads

    # compile the optimizer, the actual computational graph is compiled here
    lr = tensor.scalar(name='lr')
    print 'Building optimizers...',
    f_grad_shared, f_update = eval(optimizer)(lr, tparams, grads, inps, cost)
    print 'Done'

    print 'Optimization'

    history_errs = []
    # reload history
    if reload_ and os.path.exists(saveto):
        history_errs = list(numpy.load(saveto)['history_errs'])
    best_p = None
    bad_count = 0

    if validFreq == -1:
        validFreq = len(train[0]) / batch_size
    if saveFreq == -1:
        saveFreq = len(train[0]) / batch_size
    if sampleFreq == -1:
        sampleFreq = len(train[0]) / batch_size

    uidx = 0
    estop = False
    for eidx in xrange(max_epochs):
        n_samples = 0

        for x, y in train:
            n_samples += len(x)
            uidx += 1
            use_noise.set_value(1.)

            x, x_mask, y, y_mask = prepare_data(x,
                                                y,
                                                maxlen=maxlen,
                                                n_words_src=n_words_src,
                                                n_words=n_words)

            if x is None:
                print 'Minibatch with zero sample under length ', maxlen
                uidx -= 1
                continue

            ud_start = time.time()

            # compute cost, grads and copy grads to shared variables
            cost = f_grad_shared(x, x_mask, y, y_mask)

            # do the update on parameters
            f_update(lrate)

            ud = time.time() - ud_start

            # check for bad numbers, usually we remove non-finite elements
            # and continue training - but not done here
            if numpy.isnan(cost) or numpy.isinf(cost):
                print 'NaN detected'
                return 1., 1., 1.

            # verbose
            if numpy.mod(uidx, dispFreq) == 0:
                print 'Epoch ', eidx, 'Update ', uidx, 'Cost ', cost, 'UD ', ud

            # save the best model so far
            if numpy.mod(uidx, saveFreq) == 0:
                print 'Saving...',

                if best_p is not None:
                    params = best_p
                else:
                    params = unzip(tparams)
                numpy.savez(saveto, history_errs=history_errs, **params)
                pkl.dump(model_options, open('%s.pkl' % saveto, 'wb'))
                print 'Done'

            # generate some samples with the model and display them
            if numpy.mod(uidx, sampleFreq) == 0:
                # FIXME: random selection?
                for jj in xrange(numpy.minimum(5, x.shape[1])):
                    stochastic = True
                    sample, score = gen_sample(tparams,
                                               f_init,
                                               f_next,
                                               x[:, jj][:, None],
                                               model_options,
                                               trng=trng,
                                               k=1,
                                               maxlen=30,
                                               stochastic=stochastic,
                                               argmax=False)
                    print 'Source ', jj, ': ',
                    for vv in x[:, jj]:
                        if vv == 0:
                            break
                        if vv in worddicts_r[0]:
                            print worddicts_r[0][vv],
                        else:
                            print 'UNK',
                    print
                    print 'Truth ', jj, ' : ',
                    for vv in y[:, jj]:
                        if vv == 0:
                            break
                        if vv in worddicts_r[1]:
                            print worddicts_r[1][vv],
                        else:
                            print 'UNK',
                    print
                    print 'Sample ', jj, ': ',
                    if stochastic:
                        ss = sample
                    else:
                        score = score / numpy.array([len(s) for s in sample])
                        ss = sample[score.argmin()]
                    for vv in ss:
                        if vv == 0:
                            break
                        if vv in worddicts_r[1]:
                            print worddicts_r[1][vv],
                        else:
                            print 'UNK',
                    print

            # validate model on validation set and early stop if necessary
            if numpy.mod(uidx, validFreq) == 0:
                use_noise.set_value(0.)
                valid_errs = pred_probs(f_log_probs, prepare_data,
                                        model_options, valid)
                valid_err = valid_errs.mean()
                history_errs.append(valid_err)

                if uidx == 0 or valid_err <= numpy.array(history_errs).min():
                    best_p = unzip(tparams)
                    bad_counter = 0
                if len(history_errs) > patience and valid_err >= \
                        numpy.array(history_errs)[:-patience].min():
                    bad_counter += 1
                    if bad_counter > patience:
                        print 'Early Stop!'
                        estop = True
                        break

                if numpy.isnan(valid_err):
                    ipdb.set_trace()

                print 'Valid ', valid_err

            # finish after this many updates
            if uidx >= finish_after:
                print 'Finishing after %d iterations!' % uidx
                estop = True
                break

        print 'Seen %d samples' % n_samples

        if estop:
            break

    if best_p is not None:
        zipp(best_p, tparams)

    use_noise.set_value(0.)
    valid_err = pred_probs(f_log_probs, prepare_data, model_options,
                           valid).mean()

    print 'Valid ', valid_err

    params = copy.copy(best_p)
    numpy.savez(saveto,
                zipped_params=best_p,
                history_errs=history_errs,
                **params)

    return valid_err
Ejemplo n.º 33
0
def main():

    """
    Initialize environments
    """
    m = 1
    mass_factor = 2.1
    l = 1
    length_factor = 1.1
    b = 0.15
    g = 9.81
    dt = 0.005
    goal = np.array([[np.pi],[0]])
    x_limits = np.array([0, 6.2832])
    numPointsx = 51
    dx = (x_limits[-1] - x_limits[0])/(numPointsx - 1)
    x_dot_limits = np.array([-6.5, 6.5])
    numPointsx_dot = 81
    dx_dot = (x_dot_limits[-1] - x_dot_limits[0])/(numPointsx_dot - 1)
    Q = np.array([[40, 0], [0, 0.02]])
    R = 0.2
    test_policies = False
    environment = pendulum(m, l, b, g, dt, goal, x_limits, dx, x_dot_limits, dx_dot, Q, R)
    environment_target = pendulum(m*mass_factor, l*length_factor, b, g, dt, goal, x_limits, dx, x_dot_limits, dx_dot, Q, R)

    """
    Learn an initial policy and value function
    """
    gamma = 0.99
    x_grid = np.linspace(x_limits[0], x_limits[1], numPointsx)
    x_dot_grid = np.linspace(x_dot_limits[0], x_dot_limits[1], numPointsx_dot)
    u_limits = np.array([-25,25])
    numPointsu = 121
    u_grid = np.linspace(u_limits[0], u_limits[1], numPointsu)
    num_iterations = 600

    code_dir = os.path.dirname(os.path.realpath(__file__))
    data_dir = '../data/GPSARSA'
    data_dir = os.path.join(code_dir, data_dir)

    print('Value Iteration for target domain')
    target_file = 'data_m_%.2f_l_%.2f.pkl'%(m*mass_factor, l*length_factor)
    fileFound = False
    for root, dirs, files in os.walk(data_dir):
        for file in files:
            if (file.endswith('.pkl') and file==target_file):
                fileFound = True
                print('Relevant pre-computed data found!')
                data = pkl.load(open(os.path.join(data_dir, target_file), 'rb'))
                policy_target = data[0]
                V_target = data[1]
    if (not fileFound):
        policy_target, V_target = ValueIterationSwingUp(environment_target, gamma, x_grid, x_dot_grid, u_grid, num_iterations)
        pkl.dump((policy_target, V_target), open(os.path.join(data_dir, target_file), 'wb'))

    print('Value Iteration in simulation')
    start_file = 'data_m_%.2f_l_%.2f.pkl'%(m, l)
    fileFound = False
    for root, dirs, files in os.walk(data_dir):
        for file in files:
            if (file.endswith('.pkl') and file==start_file):
                fileFound = True
                print('Relevant pre-computed data found!')
                data = pkl.load(open(os.path.join(data_dir, start_file), 'rb'))
                policy_start = data[0]
                V_start = data[1]
    if (not fileFound):
        policy_start, V_start = ValueIterationSwingUp(environment, gamma, x_grid, x_dot_grid, u_grid, num_iterations)
        pkl.dump((policy_start, V_start), open(os.path.join(data_dir, start_file), 'wb'))
    
    V_target = np.reshape(V_target, (numPointsx, numPointsx_dot))
    V_start = np.reshape(V_start, (numPointsx, numPointsx_dot))
    policy_target = np.reshape(policy_target, (numPointsx, numPointsx_dot))
    policy_start = np.reshape(policy_start, (numPointsx, numPointsx_dot))

    """
    Test learned policies
    """
    if (test_policies):
        
        policy_start_ = RegularGridInterpolator((x_grid, x_dot_grid), policy_start)
        dyn_start = lambda t,s: environment_target.dynamics_continuous(s, policy_start_)
        int_start = ode(dyn_start).set_integrator('vode', method='bdf', with_jacobian=False)
        int_start.set_initial_value(np.array([[0], [0]]), 0)
        t_final = 10
        trajectory_start = np.empty((2, int(t_final/dt)))
        num_steps = 0
        while int_start.successful() and int_start.t<t_final:
            int_start.integrate(int_start.t+dt)
            trajectory_start[:, num_steps] = int_start.y[:,0]
            num_steps+=1

        trajectory_start = trajectory_start[:,0:num_steps]
        plt.plot(trajectory_start[0,:], trajectory_start[1,:])
        plt.scatter(np.pi, 0, c='red', marker='o')
        plt.xlabel('theta')
        plt.ylabel('theta-dot')
        plt.title('Bootstrapped Policy')
        plt.show()

        policy_target_ = RegularGridInterpolator((x_grid, x_dot_grid), policy_target)
        dyn_target = lambda t,s: environment_target.dynamics_continuous(s, policy_target_)
        int_target = ode(dyn_target).set_integrator('vode', method='bdf', with_jacobian=False)
        int_target.set_initial_value(np.array([[0], [0]]), 0)
        trajectory_target = np.empty((2, int(t_final/dt)))
        num_steps = 0
        while int_target.successful() and int_target.t<t_final:
            int_target.integrate(int_target.t+dt)
            trajectory_target[:, num_steps] = int_target.y[:,0]
            num_steps+=1

        trajectory_target = trajectory_target[:,0:num_steps]
        plt.plot(trajectory_target[0,:], trajectory_target[1,:])
        plt.scatter(np.pi, 0, c='red', marker='o')
        plt.xlabel('theta')
        plt.ylabel('theta-dot')
        plt.title('Target Policy')
        plt.show()

    """
    GPSARSA
    """
    sigma0 = 0.2
    # sigmaf = 13.6596
    # sigmal = np.array([[0.5977],[1.9957],[5.7314]])
    sigmaf = 16.8202
    sigmal = np.array([[1.3087],[2.9121],[9.6583],[7.0756]])
    nu = (sigmaf**2)*(np.exp(-1)-0.36)
    epsilon = 0.1
    max_episode_length = 1000
    num_episodes = 500

    kernel = SqExpArd(sigmal, sigmaf)
    states = np.mgrid[x_grid[0]:(x_grid[-1]+dx):dx, x_dot_grid[0]:(x_dot_grid[-1] + dx_dot):dx_dot]
    states = np.concatenate((np.reshape(states[0,:,:], (1,states.shape[1]*states.shape[2])),\
                    np.reshape(states[1,:,:], (1,states.shape[1]*states.shape[2]))), axis=0)
    V_mu = lambda s: RegularGridInterpolator((x_grid, x_dot_grid), V_start)(s.T)
    Q_mu = buildQfromV(V_mu, environment, gamma, states, u_grid[np.newaxis,:]) # Q_mu is number_of_actions x number_of_states
    Q_mu = np.reshape(Q_mu.T, (numPointsx, numPointsx_dot, numPointsu))
    Q_mu = RegularGridInterpolator((x_grid, x_dot_grid, u_grid), Q_mu)
    Q_mu_ = lambda s,a: Q_mu(np.concatenate((s,a + 0.0001*(a[0,:]<=u_grid[0]) - 0.0001*(a[0,:]>=u_grid[-1])), axis=0).T)[:,np.newaxis]
    policy_start_ = RegularGridInterpolator((x_grid, x_dot_grid), policy_start)
    policy_prior = lambda s: policy_start_(s.T)[:,np.newaxis]

    gpsarsa = GPSARSA(environment_target, u_limits[np.newaxis,:], nu, sigma0, gamma, epsilon, kernel, Q_mu_, policy_prior)
    print('GPSARSA.. ')
    gpsarsa.build_policy_monte_carlo(num_episodes, max_episode_length)
    V_gpsarsa = gpsarsa.get_value_function(states)
    V_gpsarsa = np.reshape(V_gpsarsa, (numPointsx, numPointsx_dot))
    print('Initial mean error:%f'%np.mean(np.abs(V_target - V_start)))
    print('Final mean error:%f'%np.mean(np.abs(V_target - V_gpsarsa)))
    set_trace()

    """
    Results
    """
    plt.subplot(3,1,1)
    plt.imshow(np.abs(V_target - V_start).T, aspect='auto',\
        extent=(x_limits[0], x_limits[1], x_dot_limits[1], x_dot_limits[0]), origin='upper')
    plt.ylabel('theta-dot')
    plt.xlabel('theta')
    plt.title('Initial Diff')
    plt.colorbar()

    plt.subplot(3,1,2)
    plt.imshow(np.abs(V_target - V_gpsarsa).T, aspect='auto',\
        extent=(x_limits[0], x_limits[1], x_dot_limits[1], x_dot_limits[0]), origin='upper')
    plt.ylabel('theta-dot')
    plt.xlabel('theta')
    plt.title('Final Diff')
    plt.colorbar()

    plt.subplot(3,1,3)
    plt.scatter(gpsarsa.D[0,:], gpsarsa.D[1,:], marker='o', c='red')
    plt.xlim(x_limits[0], x_limits[1])
    plt.xlabel('theta')
    plt.ylim(x_dot_limits[0], x_dot_limits[1])
    plt.ylabel('theta-dot')
    plt.title('Dictionary Points')

    resultDirName = 'GPSARSA_run'
    run = -1
    for root, dirs, files in os.walk(data_dir):
        for d in dirs:
            if (d.startswith(resultDirName)):
                extension = d.split(resultDirName)[-1]
                if (extension.isdigit() and int(extension)>=run):
                    run = int(extension)
    run += 1
    saveDirectory = os.path.join(data_dir, resultDirName + str(run))
    os.mkdir(saveDirectory)
    dl.dump_session(filename=os.path.join(saveDirectory, 'session_%d'%num_episodes))
    plt.savefig(os.path.join(saveDirectory,'V_Diff.png'))
    plt.show()
    set_trace()
def train_loop(args, train_loader, val_loader, writer):
    vocab = utils.load_vocab(args.vocab_json)
    program_generator, pg_kwargs, pg_optimizer = None, None, None
    execution_engine, ee_kwargs, ee_optimizer = None, None, None
    baseline_model, baseline_kwargs, baseline_optimizer = None, None, None
    baseline_type = None

    pg_best_state, ee_best_state, baseline_best_state = None, None, None

    # Set up model
    optim_method = getattr(torch.optim, args.optimizer)

    ##### Modified #####
    scene2action_model, scene2action_optimizer = None, None

    s2a_kwargs = {
        'feat_dim': args.scene2action_feat_dim,
        'hidden_dim': args.scene2action_hidden_dim,
        'action_dim': args.scene2action_action_dim,
        'dropout': args.scene2action_dropout,
        'word_vocab_size': args.scene2action_word_vocab_size,
        'word_embed_size': args.scene2action_word_embed_size,
        'lstm_hidden_size': args.scene2action_lstm_hidden_size,
        'lstm_num_layers': args.scene2action_lstm_num_layers,
    }

    scene2action_model = Scene2Action(**s2a_kwargs)
    scene2action_model.cuda()
    scene2action_model.train()
    scene2action_optimizer = optim_method(scene2action_model.parameters(),
                                          lr=args.learning_rate,
                                          weight_decay=args.weight_decay)

    print("Here is the scene to action network: ")
    print(scene2action_model)

    if args.model_type in ['FiLM', 'PG', 'PG+EE']:
        program_generator, pg_kwargs = get_program_generator(args)
        pg_optimizer = optim_method(program_generator.parameters(),
                                    lr=args.learning_rate,
                                    weight_decay=args.weight_decay)
        print('Here is the conditioning network:')
        print(program_generator)
    if args.model_type in ['FiLM', 'EE', 'PG+EE']:
        execution_engine, ee_kwargs = get_execution_engine(args)
        ee_optimizer = optim_method(execution_engine.parameters(),
                                    lr=args.learning_rate,
                                    weight_decay=args.weight_decay)
        print('Here is the conditioned network:')
        print(execution_engine)
    if args.model_type in ['LSTM', 'CNN+LSTM', 'CNN+LSTM+SA']:
        baseline_model, baseline_kwargs = get_baseline_model(args)
        params = baseline_model.parameters()
        if args.baseline_train_only_rnn == 1:
            params = baseline_model.rnn.parameters()
        baseline_optimizer = optim_method(params,
                                          lr=args.learning_rate,
                                          weight_decay=args.weight_decay)
        print('Here is the baseline model')
        print(baseline_model)
        baseline_type = args.model_type

    loss_fn = torch.nn.CrossEntropyLoss().cuda()

    ##### Modified #####
    loss_s2a = torch.nn.CrossEntropyLoss().cuda()

    ##### Modified #####
    stats = {
        'train_losses': [],
        'train_rewards': [],
        'train_losses_ts': [],
        'train_accs': [],
        'val_accs': [],
        'val_accs_ts': [],
        'best_val_acc': -1,
        'model_t': 0,
        'train_losses_s2a': [],
    }
    t, epoch, reward_moving_average = 0, 0, 0

    ##### Modified #####
    set_mode('train', [
        scene2action_model, program_generator, execution_engine, baseline_model
    ])

    print('train_loader has %d samples' % len(train_loader.dataset))
    print('val_loader has %d samples' % len(val_loader.dataset))

    num_checkpoints = 0
    epoch_start_time = 0.0
    epoch_total_time = 0.0
    train_pass_total_time = 0.0
    val_pass_total_time = 0.0
    running_loss = 0.0

    while t < args.num_iterations:
        if (epoch > 0) and (args.time == 1):
            epoch_time = time.time() - epoch_start_time
            epoch_total_time += epoch_time
            print(
                colored(
                    'EPOCH PASS AVG TIME: ' + str(epoch_total_time / epoch),
                    'white'))
            print(colored('Epoch Pass Time      : ' + str(epoch_time),
                          'white'))
        epoch_start_time = time.time()

        epoch += 1

        print('Starting epoch %d' % epoch)

        for batch in train_loader:

            t += 1
            print(
                "Current batch " + str(t)
            )  #------------------------------------------------------------------------------------#

            ##### Modified #####
            questions, _, feats, feats_aux, answers, programs, _ = batch
            if isinstance(questions, list):
                questions = questions[0]
            questions_var = Variable(questions.cuda())

            feats_var = Variable(feats.cuda())
            ##### Modified ####
            feats_var_aux = Variable(feats_aux.cuda())
            answers_var = Variable(answers.cuda())

            #print("answers var 0 " + str(answers_var.size(0))) ### 64

            if programs[0] is not None:
                programs_var = Variable(programs.cuda())

            reward = None

            ##### Modified #####---------------------------------------------------------
            ### Preprocess batch started ###

            # For each data in the current batch (step-by-step)

            avg_action_length = 0.0
            total_repeated_rate = 0.0
            count_turn = 0

            for turn in range(feats_var.size(0)):
                #print("Current turn " + str(turn)) #------------------------------------------------------------------------------------#

                set_mode('eval', [program_generator, execution_engine])
                current_action = -1
                current_count = 0
                current_scene = feats_var[turn]
                current_question = questions_var[turn]
                current_answer = answers_var[turn]
                actions = []
                flag = 0

                while ((current_action != args.end_action_index)
                       and (current_count < args.maximum_action_number)):
                    current_count += 1
                    current_action, action_propability = scene2action_model(
                        current_scene, current_question)
                    actions.append(current_action.item())
                    #print("Current_action " + str(current_action))
                    #print("action_propability " + str(action_propability))

                    #print("current action after " + str(current_action))
                    if (current_action != args.end_action_index):
                        if (current_action == 0):
                            current_scene = current_scene + feats_var[turn]
                        else:
                            current_scene = current_scene + feats_var_aux[
                                turn][current_action - 1]  ##### To check #####

                    temp_question = current_question.view(
                        1, -1).clone()  ##### To check #####
                    programs_pred = program_generator(temp_question)
                    current_scene = current_scene.view(
                        1, 256, 16, 16)  ##### To check #####
                    scores = execution_engine(current_scene, programs_pred)

                    current_answer = current_answer.view(
                        -1)  ##### To check #####
                    loss_current = loss_fn(scores, current_answer)

                    if (flag == 0):
                        Minus_reward_currentturn = torch.zeros(1, 1)
                        Minus_reward_currentturn[0][0] = torch.from_numpy(
                            np.array(loss_current.data.cpu().numpy()))
                    else:
                        temp = torch.zeros(1, 1)
                        temp[0][0] = torch.from_numpy(
                            np.array(loss_current.data.cpu().numpy()))
                        Minus_reward_currentturn = torch.cat(
                            (Minus_reward_currentturn, temp), 0)  ### ??? ###

                    if (flag == 0):
                        Logits_currentturn = torch.zeros(1, 1)
                        Logits_currentturn[0][0] = torch.from_numpy(
                            np.array(action_propability.data.cpu().numpy()))
                        flag = flag + 1
                    else:
                        temp = torch.zeros(1, 1)
                        temp[0][0] = torch.from_numpy(
                            np.array(action_propability.data.cpu().numpy()))

                        Logits_currentturn = torch.cat(
                            (Logits_currentturn, temp), 0
                        )  ##### To check ##### #------------------------------------------------------------------------------------#
                Logits_currentturn = Logits_currentturn.cuda()

                vt = decay_normalize_loss(Minus_reward_currentturn,
                                          args.scene2action_gamma, args)

                vt = vt.cuda()

                ##### Modified #####
                ### Add route loss here ### route_loss_rate
                loss_current_s2a = torch.mean(Logits_currentturn * vt)
                loss_current_s2a.requires_grad = True
                scene2action_optimizer.zero_grad()

                loss_current_s2a = loss_current_s2a.cuda()
                loss_current_s2a.backward()
                scene2action_optimizer.step()

                feats_var[turn] = current_scene

                avg_action_length = avg_action_length + len(actions)

                repeated_actions = 0
                for a1 in range(len(actions) - 1):
                    for a2 in range(a1 + 1, len(actions)):
                        if actions[a1] == actions[a2]:
                            repeated_actions = repeated_actions + 1
                            break
                total_repeated_rate = total_repeated_rate + (
                    repeated_actions) / (float)(len(actions))
                count_turn = count_turn + 1

            writer.add_scalar('scene2action_loss', loss_current_s2a.item(), t)

            ### Add avg length, no_repeated rate
            writer.add_scalar('avg_route_length',
                              avg_action_length / (float)(count_turn), t)
            writer.add_scalar('avg_repeat_rate',
                              total_repeated_rate / (float)(count_turn), t)

            ### Turn actions to image ###
            image_ = torch.zeros(3, 13, 13)
            print("actions ----------------------- ")
            print(actions)

            for ii_ in range(len(actions)):
                image_[0][actions[ii_]][ii_] = 0
                image_[1][actions[ii_]][ii_] = 1
                image_[2][actions[ii_]][ii_] = 1

            writer.add_image('selected_viewpoints', image_, t)

            set_mode('train', [
                scene2action_model, program_generator, execution_engine,
                baseline_model
            ])
            #exit()
            ### Preprocess batch ended ###
            ##### Modified #####---------------------------------------------------------

            if args.model_type == 'PG':
                # Train program generator with ground-truth programs
                pg_optimizer.zero_grad()
                loss = program_generator(questions_var, programs_var)
                loss.backward()
                pg_optimizer.step()
            elif args.model_type == 'EE':
                # Train execution engine with ground-truth programs
                ee_optimizer.zero_grad()
                scores = execution_engine(feats_var, programs_var)
                loss = loss_fn(scores, answers_var)
                loss.backward()
                ee_optimizer.step()
            elif args.model_type in ['LSTM', 'CNN+LSTM', 'CNN+LSTM+SA']:
                baseline_optimizer.zero_grad()
                baseline_model.zero_grad()
                scores = baseline_model(questions_var, feats_var)
                loss = loss_fn(scores, answers_var)
                loss.backward()
                baseline_optimizer.step()
            elif args.model_type == 'PG+EE':
                programs_pred = program_generator.reinforce_sample(
                    questions_var)
                scores = execution_engine(feats_var, programs_pred)

                loss = loss_fn(scores, answers_var)
                _, preds = scores.data.cpu().max(1)
                raw_reward = (preds == answers).float()
                reward_moving_average *= args.reward_decay
                reward_moving_average += (
                    1.0 - args.reward_decay) * raw_reward.mean()
                centered_reward = raw_reward - reward_moving_average

                if args.train_execution_engine == 1:
                    ee_optimizer.zero_grad()
                    loss.backward()
                    ee_optimizer.step()

                if args.train_program_generator == 1:
                    pg_optimizer.zero_grad()
                    program_generator.reinforce_backward(
                        centered_reward.cuda())
                    pg_optimizer.step()
            elif args.model_type == 'FiLM':
                if args.set_execution_engine_eval == 1:
                    set_mode('eval', [execution_engine])
                programs_pred = program_generator(questions_var)
                scores = execution_engine(feats_var, programs_pred)
                loss = loss_fn(scores, answers_var)

                pg_optimizer.zero_grad()
                ee_optimizer.zero_grad()
                if args.debug_every <= -2:
                    pdb.set_trace()
                loss.backward()
                if args.debug_every < float('inf'):
                    check_grad_num_nans(execution_engine, 'FiLMedNet')
                    check_grad_num_nans(program_generator, 'FiLMGen')

                if args.train_program_generator == 1:
                    if args.grad_clip > 0:
                        torch.nn.utils.clip_grad_norm(
                            program_generator.parameters(), args.grad_clip)
                    pg_optimizer.step()
                if args.train_execution_engine == 1:
                    if args.grad_clip > 0:
                        torch.nn.utils.clip_grad_norm(
                            execution_engine.parameters(), args.grad_clip)
                    ee_optimizer.step()

            if t % args.record_loss_every == 0:
                running_loss += loss.data.item()
                avg_loss = running_loss / args.record_loss_every
                print(t, avg_loss)
                stats['train_losses'].append(avg_loss)
                stats['train_losses_ts'].append(t)
                ##### Modified #####
                stats['train_losses_s2a'].append(loss_current_s2a)
                if reward is not None:
                    stats['train_rewards'].append(reward)
                running_loss = 0.0
            else:
                running_loss += loss.data.item()

            writer.add_scalar('train_losses', loss.data.item(), t)
            if t % args.checkpoint_every == 0:
                num_checkpoints += 1
                print('Checking training accuracy ... ')
                start = time.time()
                """
        ##### Modified #####
        train_acc = check_accuracy(args, scene2action_model, program_generator, execution_engine,
                                   baseline_model, train_loader)
        if args.time == 1:
          train_pass_time = (time.time() - start)
          train_pass_total_time += train_pass_time
          print(colored('TRAIN PASS AVG TIME: ' + str(train_pass_total_time / num_checkpoints), 'red'))
          print(colored('Train Pass Time      : ' + str(train_pass_time), 'red'))
        print('train accuracy is', train_acc)
        print('Checking validation accuracy ...')
        start = time.time()
        ##### Modified #####
        """
                val_acc = check_accuracy(args, scene2action_model,
                                         program_generator, execution_engine,
                                         baseline_model, val_loader)
                if args.time == 1:
                    val_pass_time = (time.time() - start)
                    val_pass_total_time += val_pass_time
                    print(
                        colored(
                            'VAL PASS AVG TIME:   ' +
                            str(val_pass_total_time / num_checkpoints),
                            'cyan'))
                    print(
                        colored('Val Pass Time        : ' + str(val_pass_time),
                                'cyan'))
                print('val accuracy is ', val_acc)

                ### Val_acc
                writer.add_scalar('val_acc', val_acc, t)
                """
        stats['train_accs'].append(train_acc)
        stats['val_accs'].append(val_acc)
        stats['val_accs_ts'].append(t)

        #if val_acc > stats['best_val_acc']:
        stats['best_val_acc'] = val_acc
        stats['model_t'] = t
        """
                ##### Modified #####
                best_scene2action_state = get_state(scene2action_model)
                best_pg_state = get_state(program_generator)
                best_ee_state = get_state(execution_engine)
                best_baseline_state = get_state(baseline_model)

                ##### Modified #####
                checkpoint = {
                    'args': args.__dict__,
                    'scene2action_kwargs': s2a_kwargs,
                    'scene2action_state': best_scene2action_state,
                    'program_generator_kwargs': pg_kwargs,
                    'program_generator_state': best_pg_state,
                    'execution_engine_kwargs': ee_kwargs,
                    'execution_engine_state': best_ee_state,
                    'baseline_kwargs': baseline_kwargs,
                    'baseline_state': best_baseline_state,
                    'baseline_type': baseline_type,
                    'vocab': vocab
                }
                for k, v in stats.items():
                    checkpoint[k] = v
                print('Saving checkpoint to %s' % args.checkpoint_path)
                torch.save(checkpoint,
                           args.checkpoint_path + '_' + str(t) + '.pt')
                ##### Modified #####
                del checkpoint['scene2action_state']
                del checkpoint['program_generator_state']
                del checkpoint['execution_engine_state']
                del checkpoint['baseline_state']
                #with open(args.checkpoint_path + '.json', 'w') as f:
                #  json.dump(checkpoint, f)

            if t == args.num_iterations:
                break
Ejemplo n.º 35
0
def estimate_value_fn(pomdp, s, h_BeliefNode, steps):
    import ipdb
    ipdb.set_trace()
    print(s)
        (len(input_texts), max_decoder_seq_length, num_decoder_tokens),
        dtype='float32')

for i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)):
    for t, char in enumerate(input_text):
        encoder_input_data[i, t, input_token_index[char]] = 1.
    for t, char in enumerate(target_text):
        # decoder_target_data is ahead of decoder_input_data by one timestep
        decoder_input_data[i, t, target_token_index[char]] = 1.
        if t > 0:
            # decoder_target_data will be ahead by one timestep
            # and will not include the start character.
            # ???
            decoder_target_data[i, t - 1, target_token_index[char]] = 1.

ipdb.set_trace()
# Define an input sequence and process it.
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder = LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
# We discard 'encoder_outputs' and only keep the states.
encoder_states = [state_h, state_c]

# Set up the decoder, using 'encoder_states' as initial state.
decoder_inputs = Input(shape=(None, num_decoder_tokens))
# We set up our decoder to return full output sequences,
# and to return internal states as well. 
#
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
                                     initial_state=encoder_states)
Ejemplo n.º 37
0
def main():
    from PIL import Image
    import matplotlib.pyplot as plt
    '''
    # nsynth dataset
    dir = "nsynth/nsynth-test/"
    dataset_test = nsynth(dir, fb=True)
    print ("Getting nsynth data...")
    batch = dataset_test.returnInstance(100)
    images = [ plotSpectrumBW(image) for image in batch['fft'] ]

    #import ipdb; ipdb.set_trace()
    for x in range(0, len(images)):
      if batch['fb'][x] == 1:
        img = Image.fromarray(images[x], 'L')
        img.show()
        
    '''
    # Feedback data
    feedback_files = glob.glob("feedback/*.csv")
    dataset_fb = feedback(feedback_files, self_sample=True, testing=True)
    unprocessed = True # Return wav or not
    print ("Getting feedback data...")
    feedbacks = dataset_fb.returnInstance(100, unprocessed=unprocessed)
    
    ffts = feedbacks['fft']
    if unprocessed: # Set up audio output
        from multiprocessing import Queue, Process
        
        # Launch wav reader with indicator queue
        filename_queue = Queue()
        queuedone = Queue()
        wav = Process(target=wav_player,
                      args=[filename_queue,
                            queuedone],
                      daemon = True)
        wav.start()
    
    # Play/show
    try:
        while feedbacks is not None:
            for x in range(0, len(feedbacks[list(feedbacks.keys())[0]])):
                if feedbacks['fb'][x] == 1:
                    
                    # Get frequencies directly from dataset
                    #indices = ap.freq_to_idx(feedbacks['freqs'][x], bins)
                    #indices = np.asarray(indices)
                    
                    # Get frequencies from freq vector and inferred freq ranges
                    # Calc freq ranges
                    bins = [feedbacks['max'][x]/(ap.HEIGHT-1) * n for n in range(0, ap.HEIGHT)]
                    bins = np.asarray(bins)
                    
                    # Match what model is outputting
                    vector = np.asarray([feedbacks['freqs_vector'][x]])
                    vector = ap.vector_resize(vector, 42)
                    vector = ap.vector_resize(vector, ffts[x].shape[0])
                    
                    # Get indices of freqs and match to image indices
                    indices = ap.vector_to_idx(vector)
                    indices = len(ffts[x]) - indices[0] - 1
                    
                    if len(indices) == 0:
                        import ipdb;ipdb.set_trace()
                        
                    # Draw
                    if len(indices) != 0:
                        ffts[x][indices, 0:5] = 255
                    # Display FFTs
                    plt.imshow(ffts[x])
                    plt.draw(); plt.pause(0.1)
                    
                    # Print wav files
                    wav_file = feedbacks['wav'][x].split('/')[-1]
                    beg = str(feedbacks['beg'][x])
                    print(wav_file+" "+beg)
                    
                    ### Play audio
                    if unprocessed:
                        instance = feedbacks['audio'][x]
                        if feedbacks['sample_rate'][x] != REF_RATE:
                            # Convert fb sample rate to match instances's
                            instance = audioop.ratecv(
                                feedbacks['audio'][x],          # input
                                feedbacks['audio'][x].itemsize, # bit depth (bytes)
                                1, feedbacks['sample_rate'][x],       # channels, inrate
                                REF_RATE,             # outrate
                                None)                 # state..?
                            instance = np.frombuffer(instance[0], dtype=np.int16)
                            
                        # Write to temp wav file
                        scipy.io.wavfile.write('temp.wav', REF_RATE, instance)
                        
                        # Printouts per sample
                        #print(feedbacks['wav'][x], feedbacks['beg'][x])
                        time_amplitude = sum(abs(instance))/len(instance)
                        
                        if float("-inf") in ffts[x] or float("+inf") in ffts[x]:
                            print("inf")
                        else:
                            fft_time_samples = len(ffts[x][0])
                            total_fft_volume = sum(sum(abs(ffts[x])))
                            print(int(total_fft_volume/fft_time_samples), # FFT Volume
                                  int(time_amplitude))                    # Time Volume
                        
                        # Play audio and wait for finish
                        filename_queue.put("temp.wav")
                        done = queuedone.get()
            
            # Get next batch
            feedbacks = dataset_fb.returnInstance(100, unprocessed=unprocessed)
            ffts = feedbacks['fft']
    except KeyboardInterrupt: import ipdb;ipdb.set_trace()
    
    # End playback
    filename_queue.put(None)
Ejemplo n.º 38
0
def train(**kwargs):
    for k_, v_ in kwargs.items():
        setattr(opt, k_, v_)

    device = t.device('cuda') if opt.gpu else t.device('cpu')
    if opt.vis:
        from visualize import Visualizer
        vis = Visualizer(opt.env)

    # 数据
    transforms = tv.transforms.Compose([
        tv.transforms.Resize(opt.image_size),
        tv.transforms.CenterCrop(opt.image_size),
        tv.transforms.ToTensor(),
        tv.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    dataset = tv.datasets.ImageFolder(opt.data_path, transform=transforms)
    dataloader = t.utils.data.DataLoader(dataset,
                                         batch_size=opt.batch_size,
                                         shuffle=True,
                                         num_workers=opt.num_workers,
                                         drop_last=True
                                         )

    # 网络
    netg, netd = NetG(opt), NetD(opt)
    map_location = lambda storage, loc: storage
    if opt.netd_path:
        netd.load_state_dict(t.load(opt.netd_path, map_location=map_location))
    if opt.netg_path:
        netg.load_state_dict(t.load(opt.netg_path, map_location=map_location))
    # 把网络放入GPU中,如果有的话
    netd.to(device)
    netg.to(device)

    # 定义优化器和损失
    optimizer_g = t.optim.Adam(netg.parameters(), opt.lr1, betas=(opt.beta1, 0.999))
    optimizer_d = t.optim.Adam(netd.parameters(), opt.lr2, betas=(opt.beta1, 0.999))
    # binart cross entropy
    criterion = t.nn.BCELoss().to(device)

    # 真图片label为1,假图片label为0,label是batchsize长度的数组
    # noises为生成网络的输入
    true_labels = t.ones(opt.batch_size).to(device)
    fake_labels = t.zeros(opt.batch_size).to(device)
    fix_noises = t.randn(opt.batch_size, opt.nz, 1, 1).to(device)
    noises = t.randn(opt.batch_size, opt.nz, 1, 1).to(device)

    errord_meter = AverageValueMeter()
    errorg_meter = AverageValueMeter()

    epochs = range(opt.max_epoch)
    for epoch in iter(epochs):
        for ii, (img, _) in tqdm.tqdm(enumerate(dataloader)):
            real_img = img.to(device)

            if ii % opt.d_every == 0:
                # 训练判别器
                optimizer_d.zero_grad()
                ## 尽可能的把真图片判别为正确
                output = netd(real_img)
                # 将real image放入,然后loss和true label求取
                error_d_real = criterion(output, true_labels)
                error_d_real.backward()

                ## 尽可能把假图片判别为错误,注意这里的加图片是noise从generator中拿到的!!!
                noises.data.copy_(t.randn(opt.batch_size, opt.nz, 1, 1))
                fake_img = netg(noises).detach()  # 根据噪声生成假图
                output = netd(fake_img)
                error_d_fake = criterion(output, fake_labels)
                error_d_fake.backward()

                # 上面两个都反向传播误差之后,才进行更新(optimizer自动获取parameter里面的grad然后更新)
                # !!!非常重要,这里只有判别器的parameter更新了,generator的权重不会更新!
                optimizer_d.step()

                error_d = error_d_fake + error_d_real

                errord_meter.add(error_d.item())

            if ii % opt.g_every == 0:
                # 训练生成器
                optimizer_g.zero_grad()
                noises.data.copy_(t.randn(opt.batch_size, opt.nz, 1, 1))
                fake_img = netg(noises)
                output = netd(fake_img)
                # fake image经过generater之后变成fake image,之后这个image会努力使其被判定为true
                # 但是需要注意,这里优化的参数只有生成器的,没有判别器的
                error_g = criterion(output, true_labels)
                error_g.backward()
                optimizer_g.step()
                errorg_meter.add(error_g.item())

            if opt.vis and ii % opt.plot_every == opt.plot_every - 1:
                ## 可视化
                if os.path.exists(opt.debug_file):
                    ipdb.set_trace()
                fix_fake_imgs = netg(fix_noises)
                vis.images(fix_fake_imgs.detach().cpu().numpy()[:64] * 0.5 + 0.5, win='fixfake')
                vis.images(real_img.data.cpu().numpy()[:64] * 0.5 + 0.5, win='real')
                vis.plot('errord', errord_meter.value()[0])
                vis.plot('errorg', errorg_meter.value()[0])

        if (epoch + 1) % opt.save_every == 0:
            # 保存模型、图片
            tv.utils.save_image(fix_fake_imgs.data[:64], '%s/%s.png' % (opt.save_path, epoch), normalize=True,
                                range=(-1, 1))
            t.save(netd.state_dict(), 'checkpoints/netd_%s.pth' % epoch)
            t.save(netg.state_dict(), 'checkpoints/netg_%s.pth' % epoch)
            errord_meter.reset()
            errorg_meter.reset()
Ejemplo n.º 39
0
 def forward(self, x):
     import ipdb
     ipdb.set_trace()
     return x
Ejemplo n.º 40
0
    def __init__(self, data, variables, population_size, model_names, root_dir,
                 max_cost, scenario):
        """
        data - full dataset
        variables - variables allowed to be used/dropped from models
        population_size - size of the population
        model_names - models to be used for evaluating each solution
        report_file - file to report best individual from each generation
        """
        random.seed()

        for key in data:
            temp_data = [
                x for x in data[key]
                if x[formats.DATE].month > 3 and x[formats.DATE].month < 11
            ]
            data[key] = temp_data

        self._data = data
        self._variables = variables

        # fix missing data points
        ml_data = []

        desired_vars = copy.deepcopy(self._variables)
        if scenario == 'simple_ml':
            desired_vars.append(formats.DW_PLANT)
        elif scenario == 'process_ml':
            pass
        else:
            raise Exception("Scenario %s is magnifico" % scenario)

        for entry in self._data['ml_data']:
            add = True
            for key in desired_vars:
                if entry[key] is None:
                    add = False

            if add:
                ml_data.append(entry)

        self._data['ml_data'] = ml_data

        self._population = []
        self._population_size = population_size
        self._model_names = model_names
        self._max_cost = max_cost
        self._max_cost_str = "{:.2f}".format(self._max_cost)
        self._stop_file = os.path.join(os.environ['HOME'], '.ga_signals',
                                       self._max_cost_str)

        self._best_models = []  # to be populated on each generation

        # model parameters
        self._keep_rate = 0.2
        self._cross_rate = 0.6
        self._immigration_rate = 0.2
        self._mutation_rate = 0.2

        if self._population_size == 10:
            self._probabilities = cdf(0.2, 1.2, population_size)  # for 10
        elif self._population_size == 25:
            self._probabilities = cdf(0.2, 1.2485, population_size)
        elif self._population_size == 50:
            self._probabilities = cdf(0.2, 1.249995, population_size)  # for 50
        else:
            import ipdb
            ipdb.set_trace()

        # set up report file
        self._root_dir = root_dir
        self._report_file = os.path.join(root_dir, 'report.csv')
        self._report_header = ['generation', 'pheno_cost']
        self._report_header += model_names
        self._report_header.append('total')

        self._ram_file = os.path.join(root_dir, 'ram.csv')
        CSVFileWriter(self._ram_file, [], ['date', 'ram'])

        CSVFileWriter(self._report_file, [], self._report_header)

        logging.info("Calculating max RMSEs")
        self._max_rmse = self._get_max_rmse()
        self.run_model()
Ejemplo n.º 41
0
 def to_representation(self, instance):
     import ipdb
     ipdb.set_trace()
Ejemplo n.º 42
0
    def _handle_irsb(self):
        if o.BREAK_SIRSB_START in self.state.options:
            import ipdb
            ipdb.set_trace()

        # finish the initial setup
        self._prepare_temps(self.state)

        # handle the statements
        try:
            self._handle_statements()
        except (SimSolverError, SimMemoryAddressError):
            l.warning("%s hit an when analyzing statements",
                      self,
                      exc_info=True)

        # some finalization
        self.num_stmts = len(self.irsb.statements)
        self.state.scratch.stmt_idx = self.num_stmts

        # If there was an error, and not all the statements were processed,
        # then this block does not have a default exit. This can happen if
        # the block has an unavoidable "conditional" exit or if there's a legitimate
        # error in the simulation
        self.default_exit = None
        if self.has_default_exit:
            l.debug("%s adding default exit.", self)

            self.next_expr = translate_expr(self.irsb.next, self.last_imark,
                                            self.num_stmts, self.state)
            self.state.log.extend_actions(self.next_expr.actions)

            if o.CODE_REFS in self.state.options:
                target_ao = SimActionObject(self.next_expr.expr,
                                            reg_deps=self.next_expr.reg_deps(),
                                            tmp_deps=self.next_expr.tmp_deps())
                self.state.log.add_action(
                    SimActionExit(self.state,
                                  target_ao,
                                  exit_type=SimActionExit.DEFAULT))

            self.default_exit = self.add_successor(self.state,
                                                   self.next_expr.expr,
                                                   self.default_exit_guard,
                                                   self.irsb.jumpkind)

            if o.FRESHNESS_ANALYSIS in self.state.options:
                # Note: only the default exit will have ignored_variables member.
                self.default_exit.scratch.update_ignored_variables()
        else:
            l.debug("%s has no default exit", self)

        # do return emulation and calless stuff
        successors = self.successors
        all_successors = self.successors + self.unsat_successors
        self.successors = []
        for exit_state in successors:

            if o.FRESHNESS_ANALYSIS in self.state.options:
                # Note: only the default exit will have ignored_variables member.
                self.default_exit.scratch.update_ignored_variables()

            self.successors.append(exit_state)

        for exit_state in all_successors:
            if o.CALLLESS in self.state.options and exit_state.scratch.jumpkind == "Ijk_Call":
                exit_state.registers.store(
                    exit_state.arch.ret_offset,
                    exit_state.se.Unconstrained('fake_ret_value',
                                                exit_state.arch.bits))

                exit_state.scratch.target = exit_state.se.BVV(
                    self.addr + self.irsb.size, exit_state.arch.bits)
                exit_state.scratch.jumpkind = "Ijk_Ret"

                exit_state.regs.ip = exit_state.scratch.target

            elif o.DO_RET_EMULATION in exit_state.options and exit_state.scratch.jumpkind == "Ijk_Call":
                l.debug("%s adding postcall exit.", self)

                ret_state = exit_state.copy()
                guard = ret_state.se.true if o.TRUE_RET_EMULATION_GUARD in self.state.options else ret_state.se.false
                target = ret_state.se.BVV(self.addr + self.irsb.size,
                                          ret_state.arch.bits)
                if ret_state.arch.call_pushes_ret:
                    ret_state.regs.sp = ret_state.regs.sp + ret_state.arch.bytes
                self.add_successor(ret_state, target, guard, 'Ijk_FakeRet')

        if o.BREAK_SIRSB_END in self.state.options:
            import ipdb
            ipdb.set_trace()
if args.edg_decoder:
    for p in edg_decoder.parameters():
        total_parameter += np.prod(p.size())
if args.cor_decoder:
    for p in cor_decoder.parameters():
        total_parameter += np.prod(p.size())
if args.type_decoder:
    for p in type_decoder.parameters():
        total_parameter += np.prod(p.size())
print('pytorch model parameters num:', total_parameter)

assert torch_pretrained.shape[0] >= total_parameter, 'not enough weight to load'
if torch_pretrained.shape[0] > total_parameter:
    print('Note: fewer parameters then pretrained weights !!!')
    import ipdb as pdb
    pdb.set_trace()
else:
    print('Number of parameters match!')


# Coping parameters
def copy_params(idx, parameters):
    for p in parameters:
        layer_p_num = np.prod(p.size())
        p.view(-1).copy_(
            torch.FloatTensor(torch_pretrained[idx:idx + layer_p_num]))
        idx += layer_p_num
        print('copy pointer current position: %d' % idx, end='\r', flush=True)
    return idx

Ejemplo n.º 44
0
def vector_ana(data, mask, dir_='vector', normalize=False, debug=False):
    # data_source = ['lsr', 'mesh', 'ncbigene', 'ndc', 'omim', 'orphanet', 'sabiork', 'sgd', 'sider', 'swdf',
    #            'affymetrix', 'dbsnp', 'gendr', 'goa', 'linkedgeodata', 'linkedspl', 'dbpedia']
    data_source = [
        'ncbigene', 'ndc', 'orphanet', 'sgd', 'sider', 'swdf', 'affymetrix',
        'goa', 'linkedgeodata', 'dbpedia.3.5.1.log', 'access.log-20151025',
        'access.log-20151124', 'access.log-20151126', 'access.log-20151213',
        'access.log-20151230', 'access.log-20160117', 'access.log-20160212',
        'access.log-20160222', 'access.log-20160301', 'access.log-20160303',
        'access.log-20160304', 'access.log-20160314', 'access.log-20160411'
    ]
    dbpedia = False if mask <= 8 else True
    query2text = sessions2Query2Text(data)

    query2vector = read_pkl(
        os.path.join(dir_, f'{data_source[mask]}_Vector.pkl'))
    confusionMtrix_dataset = []

    for index, sess in tqdm(enumerate(data), total=len(data), leave=True):

        session = sess['queries']
        session_len = sess['session_length']
        flag = 0
        infos = []
        for ith in range(session_len):
            queryi = session[ith]['index_in_file']
            texti = session[ith]['query_content']
            try:
                infoi = GetInfo(texti)
                infos.append(infoi)
            except:
                flag = 1
                break
        if flag:
            continue

        if normalize:
            maximum = np.zeros(10)
            for ith1 in range(session_len):
                query1 = session[ith1]['index_in_file']
                vector1 = query2vector[query1]
                for i, num in enumerate(vector1):
                    if num > maximum[i]:
                        maximum[i] = num
                if debug:
                    print(vector1)
            maximum = np.where(maximum == 0, 1, maximum)
        if debug:
            print(maximum)
            from ipdb import set_trace
            set_trace()

        mat_kl = np.zeros((session_len, session_len))
        mat_cos = np.zeros((session_len, session_len))
        for ith1 in range(session_len):
            for ith2 in range(session_len):
                key = 'index_in_file'
                query1 = session[ith1][key]
                query2 = session[ith2][key]
                vector1 = query2vector[query1]
                vector2 = query2vector[query2]
                if debug:
                    print('before normalize')
                    print(vector1)
                    print(vector2)
                if normalize:
                    vector1 = vector1 / maximum
                    vector2 = vector2 / maximum
                if debug:
                    print('after')
                    print(vector1)
                    print(vector2)
                    from ipdb import set_trace
                    set_trace()
                mat_kl[ith1][ith2] = kl_divergence(vector1, vector2)
                mat_cos[ith1][ith2] = cosine_distance(vector1, vector2)
        confusionMtrix_dataset.append({
            'index': index,
            'mat_kl': mat_kl,
            'mat_cos': mat_cos
        })

    marker = '_normalized' if normalize else ''
    write_pkl(
        os.path.join(dir_, f'{data_source[mask]}_confusionMat{marker}.pkl'),
        confusionMtrix_dataset)
    return confusionMtrix_dataset
Ejemplo n.º 45
0
def split_data_from_table(table_fname,
                          n_samples=N_SAMPLES,
                          n_features=N_FEATURES):

    h5 = ta.openFile(table_fname, mode='r')
    table = h5.root.input_output_data.readout

    l_features = table.cols.features
    l_index = table.cols.frame_index
    l_labels = table.cols.label
    import ipdb
    ipdb.set_trace()

    n_samples_total = len(l_labels)
    assert (2 * n_samples < n_samples_total)

    import warnings
    warnings.warn(
        """have something that takes a split depending on classes to keep it balanced"""
    )

    #TODO: have a balanced split on each class
    ind_total = sp.random.permutation(n_samples_total)
    ind_train = ind_total[:n_samples]
    ind_test = ind_total[n_samples:2 * n_samples]
    sp.array([(ind_train == test).any() for test in ind_test]).any()
    print "checked that train and test do not overlap"
    """
    features_train = features.T[ind_train, :n_features]
    labels_train = labels[ind_train]
    features_test = features.T[ind_test, :n_features]
    labels_test = labels[ind_test]
    """

    features_train = sp.zeros((n_samples, n_features), dtype='uint8')
    features_test = sp.zeros((n_samples, n_features), dtype='uint8')
    labels_train = []
    labels_test = []

    pbar = start_progressbar(len(ind_train),
                             '%i train features' % (len(ind_train)))
    for i, ind in enumerate(ind_train):
        features_train[i] = l_features[ind][:n_features]
        labels_train.append(l_labels[ind])
        update_progressbar(pbar, i)
    end_progressbar(pbar)

    pbar = start_progressbar(len(ind_test),
                             '%i test features' % (len(ind_test)))
    for i, ind in enumerate(ind_test):
        features_test[i] = l_features[ind][:n_features]
        labels_test.append(l_labels[i])
        update_progressbar(pbar, i)
    end_progressbar(pbar)

    labels_train = sp.array(labels_train)
    labels_test = sp.array(labels_test)

    table.flush()
    h5.close()

    return features_train, labels_train, features_test, labels_test
def main():

    ipdb.set_trace()

    print('simon')
Ejemplo n.º 47
0
    def pda_move(self, vals):
        import ipdb; ipdb.set_trace()
        restrict_package_id = vals.get('restrict_package_id', False) or False
        location_dest_id = vals.get('location_dest_id', False)
        result_package_id = vals.get('result_package_id', 0)
        create_new_result = False
        package_qty = vals.get('package_qty', False)
        product_uom_qty = vals.get('product_uom_qty', 0.00)
        message = ''
        if restrict_package_id and restrict_package_id == result_package_id and package_qty != product_uom_qty:
            return {'message': 'No puedes mover un paquete si no es completo', 'id': 0}
        if product_uom_qty <=0:
            return {'message': 'No puedes una cantidad menor o igual a 0', 'id': 0}


        if result_package_id == 0:
            result_package_id = False
        elif result_package_id > 0 and result_package_id != restrict_package_id:
            result_package_id = self.env['stock.quant.package'].browse(result_package_id)
            if result_package_id.location_id and result_package_id.location_id.id != location_dest_id:
                return {'message': 'No puedes mover a un paquete que no está en la ubicación de destino', 'id': 0}
            result_package_id = result_package_id.id
        elif result_package_id < 0:
            create_new_result = True

        if not result_package_id:
            loc_dest = self.env['stock.location'].search_read([('id', '=', location_dest_id)], 'in_pack', limit=1)
            if loc_dest:
                need_check = loc_dest['in_pack']
                if need_check:
                    create_new_result = True

        if restrict_package_id:
            ##Paquete de origen
            restrict_package_id = self.env['stock.quant.package'].browse(restrict_package_id)
            restrict_lot_id = restrict_package_id.lot_id.id
            product_id = restrict_package_id.product_id
            if package_qty:
                product_uom_qty = restrict_package_id.package_qty
            location_id = restrict_package_id.location_id.id
            if not result_package_id and not create_new_result and package_qty:
                result_package_id = restrict_package_id.id

        else:
            ##Sin paquete de origen
            restrict_lot_id = vals.get('restrict_lot_id', False)
            product_id = self.env['product.product'].browse(vals.get('product_id', False))
            location_id = vals.get('location_id', False)


        if create_new_result:
            result_package_id = self.env['stock.quant.package'].create({}).id
            message = "Se ha creado un nuevo paquete %s"%result_package_id.name

        vals = {
            'origin': 'PDA done: [%s]'%self.env.user.name,
            'restrict_package_id': restrict_package_id and restrict_package_id.id,
            'restrict_lot_id': restrict_lot_id,
            'product_id': product_id.id,
            'product_uom': product_id.uom_id.id,
            'product_uom_qty': product_uom_qty,
            'name': product_id.display_name,
            'location_id': location_id,
            'result_package_id': result_package_id,
            'location_dest_id': location_dest_id
        }
        print vals

        new_move = self.env['stock.move'].create(vals)
        if new_move:
            new_move.action_done()
            message = '%s Se ha creado: %s'%(message, new_move.display_name)
            return {'message': message, 'id': new_move.id}
        else:
            return {'message': 'Error al crear el movimeinto', 'id': 0}
def evaluate_on_physionet_2019(dataset_name, model, split='validation'):
    if split == 'validation':
        dataset = tfds.load(dataset_name,
                            split=tfds.Split.VALIDATION,
                            as_supervised=True)
    elif split == 'testing':
        dataset = tfds.load(dataset_name,
                            split=tfds.Split.TEST,
                            as_supervised=True)
    else:
        raise ValueError()

    normalizer = Normalizer(dataset_name)
    task = DATASET_TO_TASK_MAPPING[dataset_name]

    def custom_batch_fn(index, inputs):
        ts, labels = inputs
        return expand_time_into_batch(index, ts, labels)

    def model_preprocessing(index, ts, labels):
        # Remove the padding again
        length = ts[-1]
        demo = ts[0]
        ts = (demo, ) + tuple(el[:length] for el in ts[1:-1]) + (length, )
        labels = labels[:length]
        normalized_ts, labels = \
            normalizer.get_normalization_fn()(ts, labels)
        prepro_fn = model.data_preprocessing_fn()
        if prepro_fn is None:
            return index, normalized_ts, labels
        return prepro_fn(normalized_ts, labels) + (index, )

    # Add id to elements in dataset, expand timepoints into fake batch
    # dimension. Remove it and apply model specific preprocessing on the
    # instances.
    preprocessed_expanded = dataset.enumerate().map(
        custom_batch_fn).unbatch().map(model_preprocessing)

    batched_dataset = preprocessed_expanded.padded_batch(
        32,
        get_output_shapes(preprocessed_expanded),
        padding_values=get_padding_values(
            get_output_types(preprocessed_expanded)),
        drop_remainder=False)

    predictions = []
    labels = []
    instance_ids = []
    for instance in tqdm(tfds.as_numpy(batched_dataset)):
        last_index = instance[0][-1] - 1
        instance_predictions = model.predict_on_batch(instance[0])
        instance_labels = instance[1]
        instance_ids.append(instance[2])
        batch_index = np.arange(len(instance_labels))
        predictions.append(instance_predictions[(batch_index, last_index)])
        labels.append(instance_labels[(batch_index, last_index)])
    import ipdb
    ipdb.set_trace()
    predictions = np.concatenate(predictions, axis=0)
    labels = np.concatenate(labels, axis=0)
    instance_ids = np.concatenate(instance_ids, axis=0).ravel()
    instances, indexes = np.unique(instance_ids, return_index=True)
    predictions = np.split(predictions, indexes[1:])
    labels = np.split(labels, indexes[1:])

    return {
        metric_name: metric_fn(labels, predictions)
        for metric_name, metric_fn in task.metrics.items()
    }
Ejemplo n.º 49
0
def test(model_path='models/model-61', video_feat_path=video_feat_path):
    train_data, test_data = get_video_data(video_data_path,
                                           video_feat_path,
                                           train_ratio=0.7)
    test_videos = test_data['video_path'].values
    test_captions = test_data['Description'].values
    ixtoword = pd.Series(np.load('./data/ixtoword.npy').tolist())

    test_videos_unique = list()
    test_captions_list = list()
    for (video, caption) in zip(test_videos, test_captions):
        if len(test_videos_unique) == 0 or test_videos_unique[-1] != video:
            test_videos_unique.append(video)
            test_captions_list.append([caption])
        else:
            test_captions_list[-1].append(caption)

    model = Video_Caption_Generator(dim_image=dim_image,
                                    n_words=len(ixtoword),
                                    dim_embed=dim_embed,
                                    dim_hidden=dim_hidden,
                                    batch_size=batch_size,
                                    encoder_max_sequence_length=encoder_step,
                                    decoder_max_sentence_length=decoder_step,
                                    bias_init_vector=None)

    tf_loss, tf_video, tf_video_mask, tf_obj_feats, tf_caption, tf_caption_mask, tf_probs = model.build_model(
        is_test=True)
    sess = tf.InteractiveSession()

    saver = tf.train.Saver()
    saver.restore(sess, model_path)

    scorer = Meteor()
    scorer_bleu = Bleu(4)
    GTS = defaultdict(list)
    RES = defaultdict(list)
    counter = 0

    for (vid, caption) in zip(test_videos_unique, test_captions_list):
        generated_sentence = gen_sentence(sess, tf_video, tf_video_mask,
                                          tf_obj_feats, tf_caption, vid,
                                          ixtoword, 1)
        #generated_sentence_test, weights = gen_sentence(
        #    sess, video_tf, video_mask_tf, caption_tf, vid, ixtoword, weights_tf, 0.3)

        print vid, generated_sentence
        #print generated_sentence_test
        #print caption

        GTS[str(counter)] = [{
            'image_id': str(counter),
            'cap_id': i,
            'caption': s
        } for i, s in enumerate(caption)]
        RES[str(counter)] = [{
            'image_id': str(counter),
            'caption': generated_sentence[:-2] + '.'
        }]

        #GTS[vid] = caption
        #RES[vid] = [generated_sentence[:-2] + '.']
        counter += 1

        #words = generated_sentence.split(' ')
        #fig = plt.figure()
        #for i in range(len(words)):
        #    w = weights[i]
        #    ax = fig.add_subplot(len(words), 1, i+1)
        #    ax.set_title(words[i])
        #    ax.plot(range(len(w)), [ww[0] for ww in w], 'b')
        #plt.show()

        ipdb.set_trace()

    tokenizer = PTBTokenizer()
    GTS = tokenizer.tokenize(GTS)
    RES = tokenizer.tokenize(RES)

    score, scores = scorer.compute_score(GTS, RES)
    print "METEOR", score
    score, scores = scorer_bleu.compute_score(GTS, RES)
    print "BLEU", score
Ejemplo n.º 50
0
    This is yaml parser based on EasyDict.
    """
    def __init__(self, cfg_dict=None, config_file=None):
        if cfg_dict is None:
            cfg_dict = {}

        if config_file is not None:
            assert(os.path.isfile(config_file))
            with open(config_file, 'r') as fo:
                cfg_dict.update(yaml.load(fo.read()))

        super(YamlParser, self).__init__(cfg_dict)

    
    def merge_from_file(self, config_file):
        with open(config_file, 'r') as fo:
            self.update(yaml.load(fo.read()))

    
    def merge_from_dict(self, config_dict):
        self.update(config_dict)


def get_config(config_file=None):
    return YamlParser(config_file=config_file)


if __name__ == "__main__":
    cfg = YamlParser(config_file="../configs/yolov5.yaml")
    import ipdb; ipdb.set_trace()
Ejemplo n.º 51
0
 def test(self):
     import ipdb
     ipdb.set_trace()
     self.assertTrue(True)
Ejemplo n.º 52
0
    def add_transition_features(self, sequence, pos, y, y_prev, features):
        """ Adds a feature to the edge feature list.
        Creates a unique id if its the first time the feature is visited
        or returns the existing id otherwise
        """
        assert pos < len(sequence.x), pdb.set_trace()

        # Get label name from ID.
        y_name = sequence.sequence_list.y_dict.get_label_name(y)
        # Get previous label name from ID.
        y_prev_name = sequence.sequence_list.y_dict.get_label_name(y_prev)
        # Generate feature name.
        feat_name = "prev_tag::%s::%s" % (y_prev_name, y_name)

        if self.add_features and any([
                y_name == '**' and y_prev_name == '**',
                y_name == 'B' and y_prev_name == '_STOP_',
        ]):
            print("FEATURE ERROR")
            ipdb.set_trace()

        # Get feature ID from name.
        feat_id = self.add_feature(feat_name)
        # Append feature.
        if (feat_id != -1):
            features.append(feat_id)

        #################################################################
        # TRANSITION_2 + WORD_2, + POS_2
        word, low_word, pos_tag, stem = self.get_label_names(sequence, pos)
        word1, low_word1, pos_1_tag, stem1 = self.get_label_names(
            sequence, pos - 1)

        feat_name = "TRANS_WORD::%s::%s_%s::%s" % (y_name, y_prev_name,
                                                   low_word, low_word1)
        # Get feature ID from name.
        feat_id = self.add_feature(feat_name)
        # Append feature.
        if feat_id != -1:
            features.append(feat_id)

        feat_name = "TRANS_POS::%s::%s_%s::%s" % (y_name, y_prev_name, pos_tag,
                                                  pos_1_tag)
        # Get feature ID from name.
        feat_id = self.add_feature(feat_name)
        # Append feature.
        if feat_id != -1:
            features.append(feat_id)

        feat_name = "TRANS_STEM::%s::%s_%s::%s" % (y_name, y_prev_name, stem,
                                                   stem1)
        # Get feature ID from name.
        feat_id = self.add_feature(feat_name)
        # Append feature.
        if feat_id != -1:
            features.append(feat_id)

        #############################################################################
        """
        ###############################
        ## Y,Y -> ORT,ORT
        # WINDOW ORTOGRAPHIC FEATURES
        rare_ort = True
        feat_ort_1 = ""
        if word1 in [START,BR,END,RARE]:
            feat_ort_1 = word1
        else:
            for i, pat in enumerate(ORT):
                if pat.search(word1):
                    rare_ort = False
                    feat_ort_1 = DEBUG_ORT[i]
        if rare_ort:
            feat_ort_1 = "OTHER_ORT"
        feat_name = "TRANS_ORT:%s::%s_%s::%s" % (y_name,y_1_name,feat_ort,feat_ort_1)
        # Get feature ID from name.
        feat_id = self.add_feature(feat_name)
        # Append feature.
        if feat_id != -1:
            features.append(feat_id)
        """

        return features
Ejemplo n.º 53
0
def main():
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = "1"

    sess = tf.Session()

    #saver = tf.train.import_meta_graph(META_PATH)
    #saver.restore(sess, CKPT_PATH)
    #sanity_check(sess)

    # Initialize an image with random noise
    image_init = tf.random_uniform_initializer(
        minval=0,
        maxval=1,
    )
    # add regularization
    image_reg = tf.contrib.layers.l2_regularizer(
        scale=params['regularization'])
    # now define the image tesnor using the initializer and the regularizer
    #t_input = tf.placeholder(np.float32, shape=(128,128,3), name='input') # define the input tensor

    image_shape = (1, 128, 128, 3)
    images = tf.get_variable("images",
                             image_shape,
                             initializer=image_init,
                             regularizer=image_reg)

    network = inference(images, 1.0, phase_train=True, weight_decay=0.0)
    pprint(network)
    ipdb.set_trace()
    layer = network[1]['conv1']

    # Create a saver for restoring variables
    #saver = tf.train.Saver(tf.global_variables())
    # Restore the parameters
    #saver.restore(sess, model_file)
    #saver = tf.train.import_meta_graph(META_PATH)
    #saver.restore(sess, CKPT_PATH)

    # ipdb.set_trace()
    #sess.run(tf.global_variables_initializer())
    #saver.restore(sess, CKPT_PATH)
    #layer_name = 'InceptionResnetV1/Repeat_2/block8_3/Conv2d_1x1/Conv2D'
    #layer = tf.get_default_graph().get_tensor_by_name('%s:0' % layer_name)

    #aver = tf.train.import_meta_graph(META_PATH)
    #saver.restore(sess, CKPT_PATH)
    # find the placeholders
    #graph = tf.get_default_graph()
    #placeholders = [op for op in graph.get_operations() if op.type == "Placeholder"]
    #ipdb.set_trace()

    #names = [n.name for n in tf.get_default_graph().as_graph_def().node]
    #bs_names = [name for name in names if "batch_size" in name]
    #pprint(bs_names)
    # ipdb.set_trace()

    total_reg = tf.reduce_sum(
        tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
    loss = tf.negative(layer[:, :, :, channel]) + total_reg
    pprint(loss)

    vars_to_train = [
        var for var in tf.trainable_variables() if "images:0" == var.name
    ]
    optimizer = tf.train.AdamOptimizer(params['learning_rate'])
    train_op = optimizer.minimize(loss, var_list=vars_to_train)

    loss_list = list()
    image_list = list()

    sess.run(tf.global_variables_initializer())
    saver = tf.train.import_meta_graph(META_PATH)
    saver.restore(sess, CKPT_PATH)

    for step in range(params['steps']):
        loss_list.append(sess.run(loss))
        image_list.append(norm_image(sess.run(images)))
        sess.run(train_op)

    sanity_check(sess)

    print(image_list.shape)
Ejemplo n.º 54
0
import argparse
from ipdb import set_trace

from asteroid.data.wsj0_mix import create_wav_id_sample_count_list

parser = argparse.ArgumentParser()
parser.add_argument('base_path', type=str,\
        help='base path containing tr, tt and cv')
parser.add_argument('dest_folder', type=str,\
        help='Path to save the metadata')
args = parser.parse_args()

datasets = ['tr', 'cv', 'tt']

def create_meta_data(base_path, dest_folder):
    for _dataset in datasets:
        ds_base = os.path.join(base_path, _dataset, 'mix')
        meta_dest = os.path.join(dest_folder, _dataset+'.wavid.samples')
        if os.path.exists(meta_dest):
            print('{} already exists. Remove it and rerun'.format(meta_dest))
            exit(1)
        if not os.path.exists(dest_folder):
            os.makedirs(dest_folder)
        create_wav_id_sample_count_list(ds_base, meta_dest)



if __name__ == '__main__':
    set_trace()
    create_meta_data(args.base_path, args.dest_folder)
Ejemplo n.º 55
0
    def reset_model(self):
        if self._reset_mode == "free":
            qpos = self._last_qpos.copy()
            qvel = self.init_qvel.copy().squeeze()
        elif self._reset_mode == "random": # random puck pos + random arm angles
            qvel = self.init_qvel.copy().squeeze()
            qvel[self.QPOS_PUCK_INDS] = 0
            qvel[self.QPOS_GOAL_INDS] = 0

            qpos = self.init_qpos.copy()
            valid_arm_pos = False
            while (not valid_arm_pos):
                qpos[self.QPOS_JOINT_INDS] = np.random.uniform(
                    low=(-np.pi, -np.pi*3/4, -np.pi/2),
                    high=(np.pi, np.pi*3/4, np.pi/2)
                )
                self.set_state(np.array(qpos), np.array(qvel))
                eef_pos = self.get_body_com("distal_4")[:2]
                if np.all(np.abs(eef_pos) <= 0.9):
                    valid_arm_pos = True

            qpos[self.QPOS_PUCK_INDS] = np.random.uniform(
                low=(self._puck_initial_x_range[0],
                     self._puck_initial_y_range[0]),
                high=(self._puck_initial_x_range[1],
                      self._puck_initial_y_range[1])
                )

        elif self._reset_mode == "random_puck": # just randomize puck pos
            qpos = self.init_qpos.copy()

            qpos[self.QPOS_PUCK_INDS] = np.random.uniform(
                low=(self._puck_initial_x_range[0],
                     self._puck_initial_y_range[0]),
                high=(self._puck_initial_x_range[1],
                      self._puck_initial_y_range[1])
                )
            qvel = self.init_qvel.copy().squeeze()
            qvel[self.QPOS_PUCK_INDS] = 0
            qvel[self.QPOS_GOAL_INDS] = 0

        elif self._reset_mode == "distribution":
            num_init_states = self._init_states.shape[0]
            rand_index = np.random.randint(num_init_states)
            init_state = self._init_states[rand_index]

            qpos = self.init_qpos.copy()
            qpos[self.QPOS_JOINT_INDS] = np.arctan2(
                init_state[self.OBS_JOINT_SIN_INDS],
                init_state[self.OBS_JOINT_COS_INDS]
            )
            qpos[self.QPOS_PUCK_INDS] = init_state[self.OBS_PUCK_INDS]
            qvel = self.init_qvel.copy().squeeze()
        else:
            raise ValueError("reset mode must be specified correctly")

        if self._goal_mode == "random":
            if self._num_goals == 1:
                qpos[self.QPOS_GOAL_INDS] = self._goals[0]
            elif self._num_goals > 1:
                if self._swap_goal_upon_completion:
                    puck_position = self.get_body_com("puck")[:2]
                    goal_position = self.get_body_com("goal")[:2]
                    if np.linalg.norm(puck_position - goal_position) < 0.01:
                        other_goal_indices = [i for i in range(self._num_goals)
                                              if i != self._current_goal_index]
                        self._current_goal_index = np.random.choice(
                            other_goal_indices)
                else:
                    self._current_goal_index = np.random.randint(self._num_goals)
                    qpos[self.QPOS_GOAL_INDS] = self._goals[self._current_goal_index]
            else:
                qpos[self.QPOS_GOAL_INDS] = np.random.uniform(
                    low=(self._goal_x_range[0],
                         self._goal_y_range[0]),
                    high=(self._goal_x_range[1],
                          self._goal_y_range[1])
                )
        elif self._goal_mode == "curriculum-radius":
            self.goal_sampling_radius += self._goal_sampling_radius_increment
            puck_position = self.get_body_com("puck"[:2])
            bounds = np.array([puck_position - self.goal_sampling_radius,
                               puck_position + self.goal_sampling_radius])
            bounds = np.clip(bounds, -1, 1)

            goal = np.random.uniform(
                low=bounds[0, :], high=bounds[1, :]
            )
            from pprint import pprint; import ipdb; ipdb.set_trace(context=30)

            qpos[self.QPOS_GOAL_INDS] = goal
        else:
            raise ValueError("Invalid goal mode")
        # TODO: remnants from rllab -> gym conversion
        # qacc = np.zeros(self.sim.data.qacc.shape[0])
        # ctrl = np.zeros(self.sim.data.ctrl.shape[0])
        # full_state = np.concatenate((qpos, qvel, qacc, ctrl))
        if self._first_step:
            qpos[self.QPOS_JOINT_INDS] = np.array([np.pi/4, -np.pi/4, -np.pi/2])
            self._first_step = False

        self.set_state(np.array(qpos), np.array(qvel))

        return self._get_obs()
Ejemplo n.º 56
0
def dir_from_event(event):
    if isinstance(event, FileMovedEvent):
        import ipdb
        ipdb.set_trace()
        return event.dest_path, get_dest_from_src(src=event.dest_path)
    return event.src_path, get_dest_from_src(src=event.src_path)
Ejemplo n.º 57
0
def debugger(type, flag):
    print 'In debugger! (test_single.py)'
    import ipdb
    ipdb.set_trace()
Ejemplo n.º 58
0
def main(addNoise=0, savedir=None, doFastICA=False):
    N = 200
    tt = linspace(0, 10, N)

    # make sources
    s1 = 4 + cos(tt * 5)
    s2 = tt % 2

    s1 -= mean(s1)
    s1 /= std(s1)
    s2 -= mean(s2)
    s2 /= std(s2)

    pyplot.figure(1)
    pyplot.subplot(4, 1, 1)
    pyplot.title('original sources')
    pyplot.plot(tt, s1, 'bo-')
    pyplot.subplot(4, 1, 2)
    pyplot.plot(tt, s2, 'bo-')

    A = array([[3, 1], [-2, .3]])

    S = vstack((s1, s2)).T
    #print 'S', S
    print 'kurt(s1) =', kurt(s1)
    print 'kurt(s2) =', kurt(s2)
    print ' negentropy(s1) =', negentropy(s1)
    print ' negentropy(s2) =', negentropy(s2)
    print ' logcosh10(s1) =', logcosh10(s1)
    print ' logcosh10(s2) =', logcosh10(s2)
    print ' logcosh15(s1) =', logcosh15(s1)
    print ' logcosh15(s2) =', logcosh15(s2)
    print ' logcosh20(s1) =', logcosh20(s1)
    print ' logcosh20(s2) =', logcosh20(s2)
    print ' negexp(s1) =', negexp(s1)
    print ' negexp(s2) =', negexp(s2)

    X = dot(S, A)

    if addNoise > 0:
        print 'Adding noise!'
        X += random.normal(0, addNoise, X.shape)

    #print 'X', X

    x1 = X[:, 0]
    x2 = X[:, 1]

    #print 'kurt(x1) =', kurt(x1)
    #print 'kurt(x2) =', kurt(x2)

    pyplot.subplot(4, 1, 3)
    pyplot.title('observed signal')
    pyplot.plot(tt, x1, 'ro-')
    pyplot.subplot(4, 1, 4)
    pyplot.plot(tt, x2, 'ro-')

    pyplot.figure(2)
    pyplot.subplot(4, 1, 1)
    pyplot.title('original sources')
    pyplot.hist(s1)
    pyplot.subplot(4, 1, 2)
    pyplot.hist(s2)
    pyplot.subplot(4, 1, 3)
    pyplot.title('observed signal')
    pyplot.hist(x1)
    pyplot.subplot(4, 1, 4)
    pyplot.hist(x2)

    pca = PCA(X)

    #W = pca.toWhitePC(X)
    W = pca.toZca(X)

    w1 = W[:, 0]
    w2 = W[:, 1]

    print 'kurt(w1) =', kurt(w1)
    print 'kurt(w2) =', kurt(w2)

    pyplot.figure(3)
    pyplot.subplot(4, 2, 1)
    pyplot.title('observed signal')
    pyplot.hist(x1)
    pyplot.subplot(4, 2, 3)
    pyplot.hist(x2)
    pyplot.subplot(2, 2, 2)
    pyplot.plot(x1, x2, 'bo')

    pyplot.subplot(4, 2, 5)
    pyplot.title('whitened observed signal')
    pyplot.hist(w1)
    pyplot.subplot(4, 2, 7)
    pyplot.hist(w2)
    pyplot.subplot(2, 2, 4)
    pyplot.plot(w1, w2, 'bo')

    # Compute kurtosis at different angles
    thetas = linspace(0, pi, 100)
    kurt1 = 0 * thetas
    for ii, theta in enumerate(thetas):
        kurt1[ii] = kurt(dot(rotMat(theta)[0, :], W.T).T)

    # functions of data
    minfnK = lambda data: -kurt(data)**2
    minfnNEnt = lambda data: -negentropy(data)
    minfnLC10 = lambda data: -logcosh10(data)
    minfnLC15 = lambda data: -logcosh15(data)
    minfnLC20 = lambda data: -logcosh20(data)
    minfnNExp = lambda data: -negexp(data)

    # functions of the rotation angle, given W as the data
    minAngleFnK = lambda theta: minfnK(dot(rotMat(theta)[0, :], W.T).T)
    minAngleFnNEnt = lambda theta: minfnNEnt(dot(rotMat(theta)[0, :], W.T).T)
    minAngleFnLC10 = lambda theta: minfnLC10(dot(rotMat(theta)[0, :], W.T).T)
    minAngleFnLC15 = lambda theta: minfnLC15(dot(rotMat(theta)[0, :], W.T).T)
    minAngleFnLC20 = lambda theta: minfnLC20(dot(rotMat(theta)[0, :], W.T).T)
    minAngleFnNExp = lambda theta: minfnNExp(dot(rotMat(theta)[0, :], W.T).T)

    #########
    # Chosen objective function. Change this line to change which objective is used.
    #########
    minDataFn = minfnK

    minAngleFn = lambda theta: minDataFn(dot(rotMat(theta)[0, :], W.T).T)

    if doFastICA:
        # Use FastICA from sklearn
        #pdb.set_trace()
        from sklearn.decomposition import FastICA
        rng = random.RandomState(1)
        ica = FastICA(random_state=rng, whiten=False)
        ica.fit(W)
        Recon = ica.transform(W)  # Estimate the sources
        #S_fica /= S_fica.std(axis=0)   # (should already be done)
        Ropt = ica.get_mixing_matrix()
    else:
        # Manually fit angle using fmin_bfgs
        angle0 = 0
        xopt = fmin_bfgs(minAngleFn, angle0)
        xopt = xopt[0] % pi
        Ropt = rotMat(xopt)
        Recon = dot(W, Ropt.T)

    mnval = array([minAngleFn(aa) for aa in thetas])

    pyplot.figure(4)
    pyplot.title('objective vs. angle')
    #pyplot.plot(thetas, kurt1, 'bo-', thetas, mnval, 'k', xopt, minAngleFn(xopt), 'ko')
    pyplot.plot(thetas, mnval, 'b')
    if not doFastICA:
        pyplot.hold(True)
        pyplot.plot(xopt, minAngleFn(xopt), 'ko')

    pyplot.figure(5)
    pyplot.title('different gaussianness measures vs. angle')
    pyplot.subplot(6, 1, 1)
    pyplot.title('Kurt')
    pyplot.plot(thetas, array([minAngleFnK(aa) for aa in thetas]))
    pyplot.subplot(6, 1, 2)
    pyplot.title('NegEnt')
    pyplot.plot(thetas, array([minAngleFnNEnt(aa) for aa in thetas]))
    pyplot.subplot(6, 1, 3)
    pyplot.title('LogCosh10')
    pyplot.plot(thetas, array([minAngleFnLC10(aa) for aa in thetas]))
    pyplot.subplot(6, 1, 4)
    pyplot.title('LogCosh15')
    pyplot.plot(thetas, array([minAngleFnLC15(aa) for aa in thetas]))
    pyplot.subplot(6, 1, 5)
    pyplot.title('LogCosh20')
    pyplot.plot(thetas, array([minAngleFnLC20(aa) for aa in thetas]))
    pyplot.subplot(6, 1, 6)
    pyplot.title('NegExp')
    pyplot.plot(thetas, array([minAngleFnNExp(aa) for aa in thetas]))

    print 'kurt(r1) =', kurt(Recon[:, 0])
    print 'kurt(r2) =', kurt(Recon[:, 1])

    print
    print 'objective(s1) =', minDataFn(s1)
    print 'objective(s2) =', minDataFn(s2)
    print 'objective(w1) =', minDataFn(w1)
    print 'objective(w2) =', minDataFn(w2)
    print 'objective(r1) =', minDataFn(Recon[:, 0])
    print 'objective(r2) =', minDataFn(Recon[:, 1])
    print 'optimal theta:',
    if doFastICA:
        print '<not computed with FastICA>'
    else:
        print xopt, '(+pi/2 =', (xopt + pi / 2) % pi, ')'
    print 'Optimal rotation matrix:\n', Ropt

    pyplot.figure(6)
    pyplot.subplot(4, 1, 1)
    pyplot.title('original sources')
    pyplot.plot(tt, s1, 'bo-')
    pyplot.subplot(4, 1, 2)
    pyplot.plot(tt, s2, 'bo-')
    pyplot.subplot(4, 1, 3)
    pyplot.title('reconstructed sources')
    pyplot.plot(tt, Recon[:, 0], 'go-')
    pyplot.subplot(4, 1, 4)
    pyplot.plot(tt, Recon[:, 1], 'go-')

    #pyplot.show()

    if savedir:
        figname = lambda ii: os.path.join(savedir, 'figure_%02d.png' % ii)
        for ii in range(6):
            pyplot.figure(ii + 1)
            pyplot.savefig(figname(ii + 1))
        print 'plots saved in', savedir
    else:
        import ipdb
        ipdb.set_trace()
Ejemplo n.º 59
0
 def colors(self, colors):
     # TODO Test me!
     import ipdb; ipdb.set_trace()
     for color, cell in zip(colors, self.cells):
         # self.fr_color.config(bg=color)
         self.cell.color = color
Ejemplo n.º 60
0
 def _test_iterator_(self, mode, agg):
     coll = self.make_collection(agg=agg)
     for ii in coll:
         print ii
         import ipdb
         ipdb.set_trace()