コード例 #1
0
 def __init__(self,params,out_path_orig,pymc_import=False,multinest_import=False):
     params.out_save_plots = True
     
     if params.gen_type == 'transmission' or params.fit_transmission:
         if os.path.isdir(os.path.join(out_path_orig, 'stage_0')):
             params.out_path = os.path.join(out_path_orig, 'stage_0')
         else:
             params.out_path = out_path_orig
                 
         dataob = data(params)
         atmosphereob = atmosphere(dataob)
         forwardmodelob = transmission(atmosphereob)
         fittingob = fitting(forwardmodelob)
         if params.mcmc_run and pymc_import:
             fittingob.MCMC = True
         if params.nest_run and multinest_import:
             fittingob.NEST = True
         outputob = output(fittingob)
         if params.verbose or params.out_save_plots:
             outputob.plot_all(save2pdf=params.out_save_plots, param_labels=fittingob.fit_params_texlabels)
         outputob.save_ascii_spectra()
         
         
     if params.gen_type == 'emission' or params.fit_emission:
         
         folders = ['stage_0', 'stage_1']
         for f in folders:
             dir = os.path.join(out_path_orig, f)
             if os.path.isdir(dir):
                 params.out_path = dir
                 dataob = data(params)
                 if f is 'stage_1':
                     Cov_array = np.loadtxt(os.path.join(out_path_orig, 'stage_0/tp_covariance.dat'))
                     atmosphereob = atmosphere(dataob, tp_profile_type='hybrid', covariance=Cov_array)
                 else:
                     atmosphereob = atmosphere(dataob) 
                 forwardmodelob = emission(atmosphereob)
                 fittingob = fitting(forwardmodelob)
                 if params.mcmc_run and pymc_import:
                     fittingob.MCMC = True
                 if params.nest_run and multinest_import:
                     fittingob.NEST = True
                 outputob = output(fittingob)
                 if params.verbose or params.out_save_plots:
                     outputob.plot_all(save2pdf=params.out_save_plots,
                                    params_names=fittingob.fit_params_names[:fittingob.fit_X_nparams],
                                    params_labels=fittingob.fit_params_texlabels[:fittingob.fit_X_nparams])
                 outputob.save_ascii_spectra()
                 # save and plot TP profile (plotting only if save2pdf=True)
                 outputob.save_TP_profile(save2pdf=True)  #saving TP profile
                 
                 #deleting objectes
                 dataob = None; atmosphereob = None; forwardmodelob = None; fittingob = None;
                 outputob = None
                 del dataob; del atmosphereob; del forwardmodelob; del fittingob; del outputob;
def plot():
    """Start function that is initiated from the plot button, and
        the function responds to POST request. The function takes inn
        x-feature,y-feature,errorplotting,and classifier type, from the
        user and does prediction, plots and displays the plot as an image
        on the web page, together with som user frendly information.
    Returns:
        - render_template() (function): A function that renders the
            web1.html page, load the start image (url), displays a list
            of possible classfiers, and features, displays the prediction
            accuracy, and the used classifier.
    """

    assert request.method == 'POST'  # Checks that the code is in POST request
    saveFile = f"{os.getcwd()}/static/diabetes/diabetes"
    tid = time.strftime("%Y-%m-%d_%H_%M_%S")
    saveFile += f"{tid}.png"

    clfs = ['KNN', 'Logistic Regression', 'Linar SVC', 'SVC']
    featureTypes = [
        'pregnant', 'glucose', 'pressure', 'triceps', 'insulin', 'mass',
        'pedigree', 'age', 'None'
    ]

    # Data
    df = readDataFromCSV("diabetes.csv")
    training, validation = data(df)

    # Acces the form data:
    features = set(request.form.getlist("feat"))
    clf = request.form.get("classifiers")
    plot_error = request.form.get("plot_error")

    if ('None' in features):
        features.remove('None')
    featuresChoosen = list(features)
    if (len(features) == 2):
        newFile = f'\static\diabetes\diabetes{tid}.png'
        newName = f'diabetes{tid}'
    else:
        newName = 'start'
        newFile = '\static\diabetes\start.png'

    if (len(features) >= 2):
        x1 = features.pop()
        x2 = features.pop()
    else:
        x1 = features.pop()
        x2 = "mass"  # Dummy feature

    plot, accuracy = fitPredPlot(training, validation, x1, x2, plot_error, clf)
    plot.savefig(saveFile)
    return render_template('web1.html',
                           name=newName,
                           url=newFile,
                           z=accuracy,
                           classifiers=clfs,
                           clf=clf,
                           featureTypes=featureTypes,
                           features=featuresChoosen)
コード例 #3
0
ファイル: icpformat.py プロジェクト: reflectometry/osrefl
def plot(filename):
    """
    Read and print all command line arguments
    """
    import pylab

    canvas = pylab.gcf().canvas
    d = data(filename)
    if len(d.v.shape) > 2:
        pylab.gca().pcolormesh(d.v[0,:,:])
        pylab.xlabel(d.xlabel)
        pylab.ylabel(d.ylabel)
    elif len(d.v.shape) > 1:
        if filename.lower().endswith('bt4'):
            offset=1
        else:
            offset=0
        pylab.gca().pcolorfast(d.v[:,offset:])
        pylab.xlabel(d.xlabel)
        pylab.ylabel(d.ylabel)
    else:
        pylab.plot(d.x,d.v)
        pylab.xlabel(d.xlabel)
        pylab.ylabel(d.vlabel)
    pylab.show()
コード例 #4
0
def plot(filename):
    """
    Read and print all command line arguments
    """
    import pylab

    canvas = pylab.gcf().canvas
    d = data(filename)
    if len(d.v.shape) > 2:
        pylab.gca().pcolormesh(d.v[0, :, :])
        pylab.xlabel(d.xlabel)
        pylab.ylabel(d.ylabel)
    elif len(d.v.shape) > 1:
        if filename.lower().endswith('bt4'):
            offset = 1
        else:
            offset = 0
        pylab.gca().pcolorfast(d.v[:, offset:])
        pylab.xlabel(d.xlabel)
        pylab.ylabel(d.ylabel)
    else:
        pylab.plot(d.x, d.v)
        pylab.xlabel(d.xlabel)
        pylab.ylabel(d.vlabel)
    pylab.show()
コード例 #5
0
ファイル: assignment10.py プロジェクト: ShixinLi/assignment10
def main():

	print '\nGenerating results. This will take a while, so please be patient. Thank you!!\n'

	initial_data = pd.read_csv('DOHMH_New_York_City_Restaurant_Inspection_Results.csv', dtype = 'unicode')
	df = data(initial_data).clean_data()

	# Question 4
	print '\nthe sum of the grades in NYC is: ' + str(sum_grades_NYC(df))

	print '\nthe sum of the grades in BRONX is: ' + str(sum_grades_BORO(df, 'BRONX'))
	print '\nthe sum of the grades in BROOKLYN is: ' + str(sum_grades_BORO(df, 'BROOKLYN'))
	print '\nthe sum of the grades in MANHATTAN is: ' + str(sum_grades_BORO(df, 'MANHATTAN'))
	print '\nthe sum of the grades in QUEENS is: ' + str(sum_grades_BORO(df, 'QUEENS'))
	print '\nthe sum of the grades in STATEN ISLAND is: ' + str(sum_grades_BORO(df, 'STATEN ISLAND'))

	# Question 5
	print '\n......Generating graphs......'

	generate_graph_NYC(df)

	generate_graph_BORO(df, 'BRONX')
	generate_graph_BORO(df, 'BROOKLYN')
	generate_graph_BORO(df, 'MANHATTAN')
	generate_graph_BORO(df, 'QUEENS')
	generate_graph_BORO(df, 'STATEN ISLAND')

	print '\ngraphs will be saved!'
コード例 #6
0
 def __init__(self, flag=True):
     wx.Frame.__init__(self, None, size=(1000, 800), title='IEC104 Parser')
     self.menu = importerMenu()
     self.SetMenuBar(self.menu.importbar)
     self.first = 0
     self.flag = flag
     self.da = data()
     self.sp = wx.SplitterWindow(self)  # 创建一个分割窗,parent是frame
     self.p1 = wx.Panel(self.sp, style=wx.SUNKEN_BORDER)  #创建子面板p1
     #self.p1=top_panel()
     #self.p2=wx.Panel(self.sp,style=wx.SUNKEN_BORDER)  # 创建子面板p2
     self.p2 = top_panel(self.sp)
     self.p1.Hide()  # 确保备用的子面板被隐藏
     self.p2.Hide()
     self.sp1 = wx.SplitterWindow(self.p1)  # 创建一个子分割窗,parent是p1
     self.box = wx.BoxSizer(wx.VERTICAL)  #创建一个垂直布局
     self.box.Add(self.sp1, 1, wx.EXPAND)  #将子分割窗布局延伸至整个p1空间
     self.p1.SetSizer(self.box)
     self.p2.SetBackgroundColour("TURQUOISE")
     #self.p1_1 = wx.Panel(self.sp1, style=wx.SUNKEN_BORDER)#在子分割窗self.sp1的基础上创建子画板p1_1
     #self.p1_2 = wx.Panel(self.sp1, style=wx.SUNKEN_BORDER)#在子分割窗self.sp1的基础上创建子画板p1_2
     self.p1_1 = detail_panel(self.sp1)
     self.p1_2 = detail_panel(self.sp1)
     self.p1_1.Hide()
     self.p1_2.Hide()
     self.p1_1.SetBackgroundColour("#CCCCCC")
     self.p1_2.SetBackgroundColour("white")
     self.sp.SplitHorizontally(self.p2, self.p1, 0)
     self.sp1.SplitHorizontally(self.p1_1, self.p1_2, 0)
     self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBack)
コード例 #7
0
def month_devide(database, month):
    name = ''
    time = ''
    so2 = 0
    co = 0
    o3 = 0
    no2 = 0
    pm10 = 0
    pm25 = 0
    cnt = 0
    for ele in database:
        if ele.month == month:
            time = ele.year + '-' + ele.month + '-' + ele.date + '-' + '평균'
            name = ele.station
            so2 += ele.so2
            co += ele.co
            o3 += ele.o3
            no2 += ele.no2
            pm10 += ele.pm10
            pm25 += ele.pm25
            cnt += 1
    if cnt > 0:
        res = data(name, time, so2 / cnt, co / cnt, o3 / cnt, no2 / cnt,
                   pm10 / cnt, pm25 / cnt)
        return res
コード例 #8
0
def wp_data():
    # Selenium setup
    url = 'https://www.winterparkresort.com/the-mountain/mountain-report'
    driver = webdriver.Chrome(options=options,
                              executable_path="c:\\chromedriver.exe")
    driver.get(url)

    # Grabbing Winter Park data via Xpath - selecting by id/class is tedious
    temp = driver.find_element_by_xpath(
        '//*[@id="mountainConditionsApp"]/div[2]/div[1]/div/div[3] \
            /div[2]/div[1]/div[1]/div[4]/div[1]/span').text
    depth_total = driver.find_element_by_xpath(
        '//*[@id="mountainConditionsApp"]/div[2]/div[1]/div/div[3] \
            /div[2]/div[1]/div[2]/div[2]/div[1]/div[1]/span[1]').text
    depth_overnight = driver.find_element_by_xpath(
        '//*[@id="mountainConditionsApp"]/div[2]/div[1]/div/ \
            div[3]/div[2]/div[1]/div[2]/div[2]/div[3]/div[2]').text
    lifts = driver.find_element_by_xpath(
        '//*[@id="mountainConditionsApp"]/div[2]/div[2]/div/div[1]/div[2]/span'
    ).text
    trails = driver.find_element_by_xpath(
        '//*[@id="mountainConditionsApp"]/div[2]/div[2]/div/div[2]/div[2]/span'
    ).text

    driver.quit()

    # int conversion
    temp = int(temp.split('.', 1)[0])
    depth_total = int(depth_total.split('.', 1)[0])
    depth_overnight = int(depth_overnight.split('.', 1)[0])
    lifts = int(lifts)
    trails = int(trails)

    return data(temp, depth_total, depth_overnight, lifts, trails)
コード例 #9
0
    def get_data(self):
        value = ""

        for feature in self.__features:
            value = value + feature.get_feature_val() + "\n"

        return data(self.__name, value)
コード例 #10
0
def dmft_data( niw, ntau, nk, beta, blocks = ['up'] ):
  dt = data() 
  dt.blocks = blocks 
  AddGfData(dt, ['G_imp_iw', 'G_loc_iw', 'Sigma_imp_iw', 'Gweiss_iw'], blocks, 1, niw, beta, domain = 'iw', suffix='', statistic='Fermion')
  AddGfData(dt, ['Sigma_imp_tau',' Gweiss_tau'], blocks, 1, ntau, beta, domain = 'tau', suffix='', statistic='Fermion')
  dt.ks = numpy.linspace(0,2.0*numpy.pi, nk, endpoint=False)
  AddBlockNumpyData(dt, ['G0_k_iw', 'G_k_iw','G_r_iw','Sigma_k_iw','Sigma_r_iw'], blocks, (niw*2,nk,nk))
  AddBlockNumpyData(dt, ['epsilon_k'], blocks, (nk,nk))
コード例 #11
0
def run(params, options=False):

    # initialising data object
    dataob = data(params)

    #initialising TP profile instance
    atmosphereob = atmosphere(dataob)

    #initialising transmission radiative transfer code instance
    forwardmodelob = transmission(atmosphereob)

    #initialising fitting object
    fittingob = fitting(forwardmodelob)

    #fit data
    if params.downhill_run:
        if MPIimport:
            if MPI.COMM_WORLD.Get_rank() == 0:
                fittingob.downhill_fit(
                )  # simplex downhill fit, only on first core
        else:
            fittingob.downhill_fit(
            )  # simplex downhill fit, only on first core

    if MPIimport:
        MPI.COMM_WORLD.Barrier()  # wait for everybody to synchronize here

    if params.mcmc_run and pymc_import:
        fittingob.mcmc_fit()  # MCMC fit
        if MPIimport:
            MPI.COMM_WORLD.Barrier()


    if (not options and params.nest_run and multinest_import) \
        or (params.nest_run and multinest_import and not options.no_multinest):
        fittingob.multinest_fit()  # Nested sampling fit
    elif options and (params.nest_run and multinest_import
                      and options.no_multinest):
        fittingob.NEST = True

    if params.nest_poly_run and polychord_import:
        fittingob.polychord_fit()  #Polychord sampling fit
        if MPIimport:
            MPI.COMM_WORLD.Barrier()  # wait for everybody to synchronize here

    # exit if the rank of MPI process is > 0 (.e. leave only master process running)
    if MPIimport:
        MPIsize = MPI.COMM_WORLD.Get_size()
        if MPI.COMM_WORLD.Get_rank() > 0:
            sys.exit()
    else:
        MPIsize = 0

    # initiating output instance with fitted data from fitting class
    # running inteprolations with nthreads = MPIsize
    outputob = output(fittingob)

    return outputob
コード例 #12
0
ファイル: caller.py プロジェクト: Hugueskir/hellogear
def caller():

    # if __name__ == "__main__":
    userEvent = data()
    cal(userEvent)

    print('\n' * 5, "TODO:", '\n')
    print(
        "try and except http error, parse les errue et les rrajout de calendar"
    )
    print("autentification")
    print("run somewhere")

    pass
コード例 #13
0
def main():
    #loading the dataset
    dataload = open("HC_Body_Temperature.txt", "r")
    dataload = dataload.read().splitlines()
    dataSet = np.ndarray(130, dtype=data)
    for i in range(0, 130):
        temperature, gender, heartrate = dataload[i].split()
        newData = data(float(temperature), float(heartrate), float(gender))
        dataSet[i] = newData
    test = np.full((3, 15), 0, float)
    j = 0
    Trainerrors = 0
    Testerrors = 0
    for p in ([1, 2, inf]):
        print("---------------------------")
        print("p - ", p)
        for k in ([1, 3, 5, 7, 9]):
            for i in range(500):
                np.random.shuffle(dataSet)
                train_data = dataSet[:65]
                test_data = dataSet[65:]
                Trainerrors = knn(train_data, train_data, p, k)
                Testerrors += knn(train_data, test_data, p, k)
            test[0][j] = (Testerrors / 500) / 65
            if (p == inf):
                test[1][j] = -1
            else:
                test[1][j] = p
            test[2][j] = k
            j += 1
            print("********k = ", k, "******")
            print(" the errors of the test ", (Testerrors / 500) / 65, "%")
            print(" the errors of the train", (Trainerrors / 500) / 65, "%")
            Testerrors = 0
            Trainerrors = 0

    minValue = test[0].min()
    for i in range(j):
        if (test[0][i] == minValue):
            if (test[1][i] == -1):
                print("Best knn to get the min errors is for p inf ",
                      "and for k ", test[2][i], ". \n the error is: ",
                      test[0][i])
            else:
                print("Best knn to get the min errors is for p ", test[1][i],
                      "and for k ", test[2][i], ". \n the error is: ",
                      test[0][i])
コード例 #14
0
    def flag_selected(self):
        def data(s):
            if hasattr(s, 'index'):
                c = s.data.columns.get_level_values('sensor_code')
                # use of xs here so that column labels are preserved
                x = self.data.xs(c[0], 1, 'sensor_code',
                                 False).loc[s.index].copy()
                self.data.loc[s.index, s.data.columns[0]] = np.nan
                try:
                    s.redraw(self.data)
                except ValueError:
                    pass
                return x.dropna(0, 'any')

        r = pd.concat(([data(s) for s in self.selectors]), 1)
        self.flagged = r.combine_first(self.flagged) if hasattr(
            self, 'flagged') else r
コード例 #15
0
ファイル: icpformat.py プロジェクト: reflectometry/WRed
def plot(filename):
    """
    Read and print all command line arguments
    """
    import pylab

    canvas = pylab.gcf().canvas
    d = data(filename)
    if len(d.v.shape) > 2:
        pylab.gca().pcolorfast(d.v[0,:,:])
        pylab.xlabel(d.xlabel)
        pylab.ylabel(d.ylabel)
    elif len(d.v.shape) > 1:
        pylab.gca().pcolorfast(d.v)
        pylab.xlabel(d.xlabel)
        pylab.ylabel(d.ylabel)
    else:
        pylab.plot(d.x,d.v)
        pylab.xlabel(d.xlabel)
        pylab.ylabel(d.vlabel)
    pylab.show()
コード例 #16
0
ファイル: icpformat.py プロジェクト: ourobouros/WRed
def plot(filename):
    """
    Read and print all command line arguments
    """
    import pylab

    canvas = pylab.gcf().canvas
    d = data(filename)
    if len(d.v.shape) > 2:
        pylab.gca().pcolorfast(d.v[0,:,:])
        pylab.xlabel(d.xlabel)
        pylab.ylabel(d.ylabel)
    elif len(d.v.shape) > 1:
        pylab.gca().pcolorfast(d.v)
        pylab.xlabel(d.xlabel)
        pylab.ylabel(d.ylabel)
    else:
        pylab.plot(d.x,d.v)
        pylab.xlabel(d.xlabel)
        pylab.ylabel(d.vlabel)
    pylab.show()
コード例 #17
0
def day_devide(database, month):
    global Days
    name = ''
    time = ''
    so2 = 0
    co = 0
    o3 = 0
    no2 = 0
    pm10 = 0
    pm25 = 0
    cnt = 0
    res = []
    for day in Days:
        for ele in database:
            if ele.date == day and ele.month == month:
                time = ele.year + '-' + ele.month + '-' + ele.date + '-' + '평균'
                name = ele.station
                so2 += ele.so2
                co += ele.co
                o3 += ele.o3
                no2 += ele.no2
                pm10 += ele.pm10
                pm25 += ele.pm25
                cnt += 1

        if cnt > 0:
            res.append(
                data(name, time, so2 / cnt, co / cnt, o3 / cnt, no2 / cnt,
                     pm10 / cnt, pm25 / cnt))
            time = ''
            so2 = 0
            co = 0
            o3 = 0
            no2 = 0
            pm10 = 0
            pm25 = 0
            cnt = 0

    return res
コード例 #18
0
def copper_data():
    # Selenium setup
    url = 'https://www.coppercolorado.com/the-mountain/conditions-weather/snow-report'
    driver = webdriver.Chrome(options=options,
                              executable_path="c:\\chromedriver.exe")
    driver.get(url)

    # Grabbing Copper data via Xpath - selecting by id/class is tedious
    temp = driver.find_element_by_xpath(
        '/html/body/div[1]/div/div[1]/primary-header/div/nav/div[1]/\
            feeds-list/ul/li[1]/button/div[1]/widget-weather/div/span').text
    depth_total = driver.find_element_by_xpath(
        '//*[@id="main-content"]/cms-level0/section[2]/cms-level1/div[2]/section[2]/div[5]/\
        cms-level3/section/ui-section/div/div/dor-snow-report/div/div/dor-elm-loader/div/div/dor-grid/\
        div/div/div/dor-grid-item[1]/div/dor-grid/div/div/div/dor-grid-item[5]/div/div/h3'
    ).text
    depth_overnight = driver.find_element_by_xpath(
        '//*[@id="main-content"]/cms-level0/section[2]/cms-level1/div[2]/section[2]/div[5]/ \
        cms-level3/section/ui-section/div/div/dor-snow-report/div/div/dor-elm-loader/div/div/dor-grid/ \
        div/div/div/dor-grid-item[1]/div/dor-grid/div/div/div/dor-grid-item[1]/div/div/h3'
    ).text
    #Xpath for trails/lifts not working - going with class name "progress"
    trails_lifts = driver.find_elements_by_class_name("progress")
    # class name "progress" returns multiple indexes, so 4 = lifts and 3 = trails
    # the index also returns multiple lines, so only grabbing first line using split()
    lifts = trails_lifts[4].text.split('\n')[0]
    trails = trails_lifts[3].text.split('\n')[0]

    driver.quit()

    # int conversion
    temp = int(''.join(filter(str.isdigit, temp)))
    depth_total = int(''.join(filter(str.isdigit, depth_total)))
    depth_overnight = int(''.join(filter(str.isdigit, depth_overnight)))
    lifts = int(''.join(filter(str.isdigit, lifts)))
    trails = int(''.join(filter(str.isdigit, trails)))

    return data(temp, depth_total, depth_overnight, lifts, trails)
コード例 #19
0
ファイル: test.py プロジェクト: 99002651/python_mini_project
 def setUp(self):
     self.d1 = data()
     with open('satellite.csv', 'r') as file:
         reader = csv.reader(file)
         for row in reader:
             for i in range(len(row)):
                 name = row[0]
                 operator = row[1]
                 user = row[2]
                 purpose = row[3]
                 OrbitClass = row[4]
                 OrbitType = row[5]
                 perigee = row[6]
                 apogee = row[7]
                 eccentricity = row[8]
                 inclination = row[9]
                 Mass = row[10]
                 year = row[11]
                 LifeTime = row[12]
                 LaunchSite = row[13]
                 LaunchVehicle = row[14]
             self.d1.add_satellite(OrbitClass, OrbitType, LifeTime, purpose,
                                   name, operator, user)
コード例 #20
0
    def init_stats(self):
        '''
        manually initialises the TauREx output analysis
        when not loaded distance functinos are still available
        ''' 
        
        self.dir = self.options.dir
        self.params.gen_manual_waverange = False
        self.params.nest_run = False
        self.params.mcmc_run = False
        self.params.downhill_run = True

        #setting up output storage 
        self.stats = {}
        
        #loading data from TauREx NEST output 
        self.load_traces_likelihood('nest_out.db')
        
        #loading parameter list 
#         self.parameters = np.loadtxt(os.path.join(self.dir,'parameters.txt'),dtype='str')
        self.stats['parameters'] = self.NEST_db['fit_params_names']
        
        # initialising data object
        self.dataob = data(self.params)

        # initialising atmosphere object
        self.atmosphereob = atmosphere(self.dataob)

        # set forward model
        if self.params.gen_type == 'transmission':
            self.fmob = transmission(self.atmosphereob)
        elif self.params.gen_type == 'emission':
            self.fmob = emission(self.atmosphereob)
        
        #initialising fitting object 
        self.fitting = fitting(self.fmob)
コード例 #21
0
 def extract_data(base_dir, I0EXP, ul=0, Nchi=513, get_VPLASMA=0):
     if ul==0:
         c = data(base_dir + 'RUNrfa.p', I0EXP = I0EXP, Nchi=Nchi)
         d = data(base_dir + 'RUNrfa.vac', I0EXP = I0EXP, Nchi=Nchi)
         if get_VPLASMA:
             c.get_VPLASMA()
             d.get_VPLASMA()
         return (c,d)
     else:
         a = data(base_dir + 'RUN_rfa_lower.p', I0EXP = I0EXP, Nchi=Nchi)
         b = data(base_dir + 'RUN_rfa_lower.vac', I0EXP = I0EXP, Nchi=Nchi)
         c = data(base_dir + 'RUN_rfa_upper.p', I0EXP = I0EXP, Nchi=Nchi)
         d = data(base_dir + 'RUN_rfa_upper.vac', I0EXP = I0EXP, Nchi=Nchi)
         if get_VPLASMA:
             a.get_VPLASMA()
             b.get_VPLASMA()
             c.get_VPLASMA()
             d.get_VPLASMA()
         return (a,b,c,d)
コード例 #22
0
ファイル: gan_model.py プロジェクト: TipsyApple/odd_GAN
                stride=2,
                activation_fn=lrelu,
                normalizer_fn=tf.contrib.layers.batch_norm)

            shared = tf.contrib.layers.flatten(shared)

            d = tf.contrib.layers.fully_connected(
                shared,
                1,
                activation_fn=None,
                weights_initializer=tf.random_normal_initializer(0, 0.02))
            q = tf.contrib.layers.fully_connected(
                shared,
                128,
                activation_fn=lrelu,
                normalizer_fn=tf.contrib.layers.batch_norm)
            q = tf.contrib.layers.fully_connected(
                q, 2, activation_fn=None)  # 10 classes
            return d, q

    @property
    def vars(self):
        return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                 scope=self.name)


if __name__ == '__main__':
    data = Data()
    print(data()[0])
    run_net = GanModel(Data())
    run_net.train()
コード例 #23
0
ファイル: script.py プロジェクト: mufan-li/nnet
import numpy as np
import numpy.random as rd
import pickle

# my classes
from nnet import *
from data import *
from error import *

# import data from pickle file
# mnist_file = '/Users/billli/Dropbox/Homework/ECE521/A5/mnist.pkl'
mnist_file = 'mnist.pkl'
mnist_data = pickle.load( open( mnist_file, "rb" ) )
tmp = np.zeros((mnist_data['y_test'].shape[0],10))

for k in range(mnist_data['y_test'].shape[0]):
	idx = np.mod(mnist_data['y_test'][k],10)
	tmp[k,idx] = 1

mnist_data['Y_test'] = tmp

train = data(mnist_data['X'][:50000,:], mnist_data['Y'][:50000,:])
valid = data(mnist_data['X'][50000:51000,:], mnist_data['Y'][50000:51000,:])
test = data(mnist_data['X_test'], mnist_data['Y_test'])

## remove large variables
del mnist_data, tmp

# train
execfile('training.py')
コード例 #24
0
    convolution23 = convolutionalConnection(poolLayer2, convLayer3,
                                            np.ones([6, 16]), 5, 5, 1, 1)
    pooling34 = poolingConnection(convLayer3, poolLayer4, 2, 2)
    convolution45 = convolutionalConnection(poolLayer4, convLayer5,
                                            np.ones([16, 100]), 5, 5, 1, 1)
    full56 = fullConnection(convLayer5, hiddenLayer6)
    full67 = fullConnection(hiddenLayer6, outputLayer7)

    f = gzip.open("../models/weights-MNIST.pkl")
    (convolution01.k, convolution01.biasWeights, \
     convolution23.k, convolution23.biasWeights, \
     convolution45.k, convolution45.biasWeights, \
     full56.w, full67.w) = cPickle.load(f)
    f.close()

    d = data()
    images, labels = d.loadData("../data/MNISTtest.pkl")
    print "Loaded", len(images), "images of shape", images[0].shape

    confusionMatrix = np.zeros([10, 10])
    total = 0.0
    correct = 0.0
    for i in range(len(images)):

        inputLayer0.set_FM(np.array([images[i]]))

        convolution01.propagate()
        pooling12.propagate()
        convolution23.propagate()
        pooling34.propagate()
        convolution45.propagate()
コード例 #25
0
ファイル: main.py プロジェクト: punkungkub/RoobotEvolution
    for people in range(population):
        robot_sim=robot(init_pos=init_pos,nMass=nMass,edge=edge_length)
        mass=robot_sim.cube()
        population_info.update({people:{'sphere':robot_sim.genSphere(mass),'fitness':0}})
    return population_info

material = [[1000, 0, 2*pi, 0],
            [20000, 0, 2*pi, 0],
            [5000, 1e-3, 2*pi, 0],
            [5000, 1e-3, 2*pi, pi]]

# environment = environment(2)
# environment.checkerBoard(10)
population_info=init_sphere()
past_population=population_info
data_management=data()

for decade in range(1,century):
    for gen in range(1,generation):
        print('Generation: {}'.format(gen*decade))
        past_population=population_info
        digiEvol = evolution(population=population_info)
        digiEvol.mutation_size()
        digiEvol.mutation_center()
        population_info=digiEvol.crossover()
        for people in population_info:
            print('Robot: {}'.format(people+1))
            robot_sim=robot(init_pos=init_pos,nMass=nMass,edge=edge_length)
            mass=robot_sim.cube()
            spring=robot_sim.genSpring(spherePos=population_info[people]['sphere'])
            # robot_sim.showBalls(spherePos=population_info[people]['sphere'])
コード例 #26
0
def objective(params):
    """Objective function for Hyperparameter Optimization"""
    # Keep track of evals
    global ITERATION
    ITERATION += 1
    start = timer()
    train_dataloader, valid_dataloader_MATRES, test_dataloader_MATRES, valid_dataloader_HIEVE, test_dataloader_HIEVE, num_classes = data(
        dataset, debugging, params['downsample'], batch_size)
    if finetune:
        model = roberta_mlp(num_classes, dataset, add_loss, params)
    else:
        model = BiLSTM_MLP(num_classes, dataset, add_loss, params)
    model.to(cuda)
    model.zero_grad()
    if len(gpu_num) > 1:
        model = nn.DataParallel(
            model)  # you may try to run the experiments with multiple GPUs
    print("# of parameters:", count_parameters(model))
    model_name = rst_file_name.replace(
        ".rst", "")  # to be designated after finding the best parameters
    total_steps = len(train_dataloader) * epochs
    print("Total steps: [number of batches] x [number of epochs] =",
          total_steps)

    # Total number of training steps is [number of batches] x [number of epochs].
    # (Note that this is not the same as the number of training samples).
    if dataset == "MATRES":
        total_steps = len(train_dataloader_MATRES) * epochs
        print("Total steps: [number of batches] x [number of epochs] =",
              total_steps)
        matres_exp = exp(cuda, model, epochs, params['learning_rate'],
                         train_dataloader_MATRES, valid_dataloader_MATRES,
                         test_dataloader_MATRES, None, None, finetune, dataset,
                         MATRES_best_PATH, None, None, model_name)
        T_F1, H_F1 = matres_exp.train()
        matres_exp.evaluate(eval_data="MATRES", test=True)
    elif dataset == "HiEve":
        total_steps = len(train_dataloader_HIEVE) * epochs
        print("Total steps: [number of batches] x [number of epochs] =",
              total_steps)
        hieve_exp = exp(cuda, model, epochs, params['learning_rate'],
                        train_dataloader_HIEVE, None, None,
                        valid_dataloader_HIEVE, test_dataloader_HIEVE,
                        finetune, dataset, None, HiEve_best_PATH, None,
                        model_name)
        T_F1, H_F1 = hieve_exp.train()
        hieve_exp.evaluate(eval_data="HiEve", test=True)
    elif dataset == "Joint":
        total_steps = len(train_dataloader) * epochs
        print("Total steps: [number of batches] x [number of epochs] =",
              total_steps)
        joint_exp = exp(cuda, model, epochs, params['learning_rate'],
                        train_dataloader, valid_dataloader_MATRES,
                        test_dataloader_MATRES, valid_dataloader_HIEVE,
                        test_dataloader_HIEVE, finetune, dataset,
                        MATRES_best_PATH, HiEve_best_PATH, None, model_name)
        T_F1, H_F1 = joint_exp.train()
        joint_exp.evaluate(eval_data="HiEve", test=True)
        joint_exp.evaluate(eval_data="MATRES", test=True)
    else:
        raise ValueError("Currently not supporting this dataset! -_-'")

    print(f'Iteration {ITERATION} result: MATRES F1: {T_F1}; HiEve F1: {H_F1}')
    loss = 2 - T_F1 - H_F1

    run_time = format_time(timer() - start)

    # Write to the csv file ('a' means append)
    print(
        "########################## Append a row to out_file ##########################"
    )
    of_connection = open(out_file, 'a')
    writer = csv.writer(of_connection)
    writer.writerow([loss, T_F1, H_F1, params, ITERATION, run_time])

    # Dictionary with information for evaluation
    return {'loss': loss, 'MATRES F1': T_F1, 'HiEve F1': H_F1, \
            'params': params, 'iteration': ITERATION, \
            'train_time': run_time, 'status': STATUS_OK}
コード例 #27
0
def run(params, options=False):

    out_path_orig = params.out_path

    ###############################
    # STAGE 1
    ###############################

    # set output directory of stage 1
    params.out_path = os.path.join(out_path_orig, 'stage_0')

    # initialising data object
    dataob = data(params)

    #initialising TP profile instance
    atmosphereob = atmosphere(dataob)

    #initialising emission radiative transfer code instance
    forwardmodelob = emission(atmosphereob, stage=0)

    #initialising fitting object
    fittingob = fitting(forwardmodelob)

    #fit data for stage 1
    if params.downhill_run:
        fittingob.downhill_fit()  #simplex downhill fit

    if params.mcmc_run and pymc_import:
        fittingob.mcmc_fit()  # MCMC fit
        if MPIimport:
            MPI.COMM_WORLD.Barrier()  # wait for everybody to synchronize here

    if params.nest_run and multinest_import:
        fittingob.multinest_fit()  # Nested sampling fit
        if MPIimport:
            MPI.COMM_WORLD.Barrier()  # wait for everybody to synchronize here

    if MPIimport and MPI.COMM_WORLD.Get_rank() != 0:
        exit()

    outputob = output(fittingob,
                      out_path=os.path.join(out_path_orig, 'stage_0'))

    return outputob

    #     exit()

    # todo fix stage 2

    # generating TP profile covariance from previous fit
    Cov_array = generate_tp_covariance(outputob)

    # saving covariance
    if MPIimport and MPI.COMM_WORLD.Get_rank() is 0 or MPIimport is False:
        np.savetxt(os.path.join(params.out_path, 'tp_covariance.dat'),
                   Cov_array)

    ###############################
    # STAGE 2
    ###############################

    if params.fit_emission_stage2:
        # set output directory of stage 2
        params.out_path = os.path.join(out_path_orig, 'stage_1')

        #setting up objects for stage 2 fitting
        dataob1 = data(params)

        #setting stage 2 atmosphere object
        atmosphereob1 = atmosphere(dataob1,
                                   tp_profile_type='hybrid',
                                   covariance=Cov_array)

        #setting stage 2 forward model
        forwardmodelob1 = emission(atmosphereob1)

        #setting stage 2 fitting object
        fittingob1 = fitting(forwardmodelob1)

        # #running stage 2 fit
        if params.downhill_run:
            fittingob1.downhill_fit()  #simplex downhill fit

        if params.mcmc_run and pymc_import:
            fittingob1.mcmc_fit()  # MCMC fit
            MPI.COMM_WORLD.Barrier()  # wait for everybody to synchronize here

        if params.nest_run and multinest_import:
            fittingob1.multinest_fit()  # Nested sampling fit
            MPI.COMM_WORLD.Barrier()  # wait for everybody to synchronize here

    ###############
    #finished fitting. Post fitting analysis from here

    #forcing slave processes to exit at this stage
    if MPIimport and MPI.COMM_WORLD.Get_rank() != 0:
        exit()

    #initiating output instance with fitted data from fitting class
    if params.fit_emission_stage2:
        outputob1 = output(fittingob1,
                           out_path=os.path.join(out_path_orig, 'stage_1'))
コード例 #28
0
        bidir_conv_lstm.output.reads(bidir_common)
    return bidir_conv_lstm


CONV_data = td.Record((td.Map(
    td.Vector(vsize) >> td.Function(lambda x: tf.reshape(x, [-1, vsize, 1]))),
                       td.Map(td.Scalar())))
CONV_model = (CONV_data >> bidirectional_dynamic_CONV(
    multi_convLSTM_cell([vsize, vsize, vsize], [100, 100, 100]),
    multi_convLSTM_cell([vsize, vsize, vsize], [100, 100, 100])) >> td.Void())

FC_data = td.Record((td.Map(td.Vector(vsize)), td.Map(td.Scalar())))
FC_model = (FC_data >> bidirectional_dynamic_FC(multi_FC_cell(
    [1000] * 5), multi_FC_cell([1000] * 5), 1000) >> td.Void())

store = data(FLAGS.data_dir + FLAGS.data_type, FLAGS.truncate)

if FLAGS.model == "lstm":
    model = FC_model
elif FLAGS.model == "convlstm":
    model = CONV_model
else:
    raise NotImplemented

compiler = td.Compiler.create(model)
logits = tf.squeeze(compiler.metric_tensors['logits'])
labels = compiler.metric_tensors['labels']
predictions = tf.nn.sigmoid(logits)

l1_loss = tf.reduce_mean(tf.abs(tf.subtract(labels, predictions)))
l2_loss = tf.reduce_mean(tf.square(tf.subtract(labels, predictions)))
コード例 #29
0
ファイル: ensemble.py プロジェクト: piupiuup/competition
def main():
    # 计算第一次对cate8的行为在4天内的用户
    get_actions = data()
    user_data = get_actions.actions[get_actions.actions.cate == 8]
    user = user_data.groupby('user_id').count().index
    result = np.zeros((len(user), 2))
    result_first = ((pd.to_datetime('2016-04-16') -  pd.to_datetime(user_data.groupby('user_id').first()['time']))\
                     / np.timedelta64(1,'D')).values
    user = user_data.groupby('user_id').count().index
    user = pd.DataFrame(user)
    user['first'] = result_first
    user['first'] = (user['first'] <= 4).astype(np.int)
    # --------------------------------
    files = ['../u1_u5/probability/u_result_1.csv', '../u1_u5/probability/u_result_2.csv', '../u1_u5/probability/u_result_3.csv', \
             '../u1_u5/probability/u_result_4.csv', '../u1_u5/probability/u_result_5.csv']
    f_user = {}
    for rounds in range(len(files)):
        # (1) 加载两个ui模型,找出第一次对cate8的行为在4天内的用于预测
        ui1 = pd.read_csv('../ui_model_1.csv')
        ui2 = pd.read_csv('../ui_model_2.csv')
        ui = ui1.copy()
        ui['label'] = (ui1['label'] + ui2['label']) * 0.5
        ui = pd.merge(ui, user, how='left', on='user_id')
        ui_select = ui[ui.label > 0.3]
        ui_select = ui_select[ui_select['first'] == 1]
        user_bought = get_actions.actions[(get_actions.actions.type == 4)
                                          & (get_actions.actions.cate == 8)]
        user_bought = user_bought[user_bought.columns[[0, 4]]]
        user_bought = user_bought.groupby('user_id', as_index=False).first()
        user_select = pd.merge(ui_select,
                               user_bought,
                               how='left',
                               on=['user_id'])
        user_select = user_select[user_select.type == 4]
        user_select = user_select[user_select.columns[[0, 1, 4]]]
        ui_select = pd.merge(ui_select,
                             user_select,
                             how='left',
                             on=['user_id', 'sku_id'])
        ui_select = ui_select[ui_select.type != 4]
        ui_max_sku = ui_select.groupby('user_id', as_index=False).max()
        ui_max_sku = ui_max_sku[ui_max_sku.columns[[0, 1, 3]]]
        ui_max_sku = pd.merge(ui_select,
                              ui_max_sku,
                              how='left',
                              on=['user_id', 'sku_id'])
        ui_select_sku = ui_max_sku[ui_max_sku['first_y'] == 1]
        # (2) 加载u模型,选出前800个高预测中第一次对cate8的行为在4天内的用户
        user_model = pd.read_csv(files[int(rounds)])
        user_select = user_model[:800]
        user_select = pd.merge(user_select, user, how='left', on='user_id')
        user_select = user_select[user_select['first'] == 1]
        # (3) 第一部分,两个七天内的用户取交集
        first_part_user = np.intersect1d(ui_select_sku.values[:, 0],
                                         user_select.values[:, 1])
        ui_max = ui.groupby('user_id', as_index=False).max()
        ui_max = pd.merge(ui, ui_max, how='left', on=['user_id', 'label'])
        ui_max = ui_max.dropna()
        first_part_user_sku = ui_max[ui_max.user_id.isin(first_part_user)]
        first_part_user_sku = first_part_user_sku.values[:, :2].astype(np.int)
        print(first_part_user_sku.shape[0])
        # (4) 第二部分,两个ui模型均值的最大预测(>0.9)
        ui_highest = ui[ui.label >= 0.9]
        ui_highest = np.setdiff1d(ui_highest.values[:, 0],
                                  ui_select_sku.values[:, 0])
        ui_highest = ui[ui.user_id.isin(ui_highest)]
        ui_highest_max = ui_highest.groupby('user_id', as_index=False).max()
        ui_highest_max = ui_highest_max[ui_highest_max.columns[[0, 2, 3]]]
        ui_highest_max = pd.merge(ui_highest,
                                  ui_highest_max,
                                  how='left',
                                  on=['user_id', 'label'])
        ui_highest_max = ui_highest_max[~np.isnan(ui_highest_max['first_y'])]
        second_part_user_sku = ui_highest_max.values[:, :2].astype(np.int)
        print(second_part_user_sku.shape[0])
        # (5) 第三部分,u模型的非4天首交用户的前150
        user_select = user_model[:800]
        user_select = pd.merge(user_select, user, how='left', on='user_id')
        user_select = user_select[user_select['first'] != 1]
        third_part_user = user_select[:150].values[:, 1]
        third_part_user_sku = ui_max[ui_max.user_id.isin(third_part_user)]
        third_part_user_sku = third_part_user_sku.values[:, :2].astype(np.int)
        print(third_part_user_sku.shape[0])
        # (6) 第四部分,ui模型4天首交用户非第一部分中的前100
        ui_max_select = ui_select_sku[~ui_select_sku.user_id.
                                      isin(first_part_user)]
        ui_max_select.sort_values(by='label', ascending=False, inplace=True)
        fourth_part_user = ui_max_select[:100].values[:, 0]
        fourth_part_user_sku = ui_select_sku[ui_select_sku.user_id.isin(
            fourth_part_user)]
        fourth_part_user_sku = fourth_part_user_sku.values[:100, :2].astype(
            np.int)
        print(fourth_part_user_sku.shape[0])
        # (7) 综合所有四个部分
        final_user = np.union1d(first_part_user, second_part_user_sku[:, 0])
        final_user = np.union1d(final_user, third_part_user)
        final_user = np.union1d(final_user, fourth_part_user)
        final_user_sku = ui_max[ui_max.user_id.isin(final_user)]
        final_user_sku = final_user_sku.values[:, :2].astype(np.int)
        print(final_user_sku.shape[0])
        f_user[str(int(rounds))] = pd.DataFrame({'user_id': final_user})
        f_user[str(int(rounds))][str(int(rounds))] = 1
    # -------------------
    # 开始投票
    final_user = f_user['0']
    for rounds in range(1, len(files)):
        final_user = pd.merge(final_user,
                              f_user[str(rounds)],
                              how='outer',
                              on='user_id')
    final_user = final_user.fillna(0)
    final_user['sum'] = 0
    for rounds in range(len(files)):
        final_user['sum'] += final_user[str(rounds)]
    final_user = final_user[final_user['sum'] >= 2]
    final_user = final_user.values[:, 0]
    final_user_sku = ui_max[ui_max.user_id.isin(final_user)]
    final_user_sku = final_user_sku.values[:, :2].astype(np.int)
    print(final_user_sku.shape[0])
    submit = pd.DataFrame({
        'user_id': final_user_sku[:, 0].astype(np.int),
        'sku_id': final_user_sku[:, 1].astype(np.int)
    })
    submit = submit[['user_id', 'sku_id']]
    submit.to_csv(args.output, index=False)
コード例 #30
0
"""
This is a file that checks everything 
data(data.py): Gives access to global data variable
re(regex): Gives access to regex for valid_email

"""
from error import InputError, AccessError
import data
import re
import hashlib
import jwt
import requests
import re

def check_valid_token(token):
    """
    Determine whether supplied token is valid

    Parameters:
        token(string): An authorisation hash

    Returns:
        Raises an error if token is invalid
        Returns u_id if token is valid
    """
    try:
        # If parent function was called using http, token is in ASCII.
        # If parent function was called via command line, token is a byte string.
        # I don't understand why.
コード例 #31
0
def monthly_search(service_key, station_name, databaseformonth):
    url = "http://openapi.airkorea.or.kr/openapi/services/rest/ArpltnInforInqireSvc/getMsrstnAcctoRltmMesureDnsty?serviceKey=" + service_key + "&numOfRows=2136&pageSize=2136&pageNo=1&startPage=1&stationName=" + urllib.parse.quote(
        station_name) + "&dataTerm=3MONTH&ver=1.3"
    req = urllib.request.Request(url)
    resp = urllib.request.urlopen(req)
    rescode = resp.getcode()

    # 항목	SO2	CO	O3	NO2	PM10	PM2.5
    # 단위	ppm	ppm	ppm	ppm	㎍/㎥	㎍/㎥
    # PM10 : 미세먼지 PM25 : 초미세먼지 O3 : 오존
    if rescode == 200:
        resp_body = resp.read()
        doc = parseString(resp_body.decode('utf-8'))
        ele = doc.getElementsByTagName('item')
        time = ''
        so2 = ''
        co = ''
        no2 = ''
        o3 = ''
        pm10 = ''
        pm25 = ''
        for item in ele:
            for info in item.childNodes:
                if info.nodeName == 'dataTime':
                    time = str(info.firstChild.data)
                if info.nodeName == 'so2Value':
                    if info.firstChild.data == '-':
                        so2 = 0
                    else:
                        so2 = str(info.firstChild.data)
                if info.nodeName == 'coValue':
                    if info.firstChild.data == '-':
                        co = 0
                    else:
                        co = str(info.firstChild.data)
                if info.nodeName == 'no2Value':
                    if info.firstChild.data == '-':
                        no2 = 0
                    else:
                        no2 = str(info.firstChild.data)
                if info.nodeName == 'o3Value':
                    if info.firstChild.data == '-':
                        o3 = 0
                    else:
                        o3 = str(info.firstChild.data)
                if info.nodeName == 'pm10Value':
                    if info.firstChild.data == '-':
                        pm10 = 0
                    else:
                        pm10 = str(info.firstChild.data)
                if info.nodeName == 'pm25Value':
                    if info.firstChild.data == '-':
                        pm25 = 0
                    else:
                        pm25 = str(info.firstChild.data)
                    newdata = data(station_name, time, so2, co, o3, no2, pm10,
                                   pm25)
                    databaseformonth.append(newdata)

    else:
        print("에러 코드 : " + str(rescode))
コード例 #32
0
def main():
    # Get running configuration
    config, _ = get_config()
    print_config()


    # Build tensorflow graph from config
    print("Building graph...")
    actor = Actor(config)

    # Creating dataset
    if not config.inference_mode:
        l = []
        for i in range(config.nCells):
            for j in range(config.nMuts):
                l.append([i,j])
        l = np.asarray(l)

    
    # Saver to save & restore all the variables.
    variables_to_save = [v for v in tf.global_variables() if 'Adam' not in v.name]
    saver = tf.train.Saver(var_list=variables_to_save, keep_checkpoint_every_n_hours=1.0, max_to_keep= 1000)  

    print("Starting session...")
    with tf.Session() as sess:
        # Run initialize op
        sess.run(tf.global_variables_initializer())

        # Training mode
        if not config.inference_mode:

            dataset = data(config.nb_epoch*config.batch_size, config.nCells, config.nMuts, config.ms_dir, config.alpha, config.beta)
            print('Dataset was created!')
            matrices_p, matrices_n = dataset

            print("Starting training...")
            for i in tqdm(range(config.nb_epoch)): 
             
                feed = {actor.input_: train_batch(config, np.asarray(matrices_n), l, i)}

                # Forward pass & train step
                summary, train_step1, train_step2 = sess.run([actor.merged, actor.train_step1, actor.train_step2], feed_dict=feed)

            print("Training COMPLETED !")
            saver.save(sess, config.save_to + "/actor.ckpt")
        # Inference mode
        else:

            dataset = data(config.nTestMats, config.nCells, config.nMuts, config.ms_dir, config.alpha, config.beta)
            print('Dataset was created!')
            matrices_p, matrices_n = dataset
            matrices_n_t = np.asarray(matrices_n)
            matrices_p_t = np.asarray(matrices_p)
            nMats = np.shape(matrices_n_t)[0]

            saver.restore(sess, config.restore_from + "/actor.ckpt")
            print("Model restored.")
            
            V_o = np.zeros((nMats, 1), dtype = np.float64)
            f_1_to_0_o = np.zeros((nMats, 1), dtype = np.float64)
            f_0_to_1_o = np.zeros((nMats, 1), dtype = np.float64)
            N00_o = np.zeros((nMats, 1), dtype = np.float64)
            N11_o = np.zeros((nMats, 1), dtype = np.float64)            
            N00_NLL_o = np.zeros((nMats, 1), dtype = np.float64)
            N11_NLL_o = np.zeros((nMats, 1), dtype = np.float64)
            N10_NLL_o = np.zeros((nMats, 1), dtype = np.float64)
            N01_NLL_o = np.zeros((nMats, 1), dtype = np.float64)
            NLL_o = np.zeros((nMats, 1), dtype = np.float64)
            V_o = np.zeros((nMats, 1), dtype = np.float64)
            
            
            fp_fn = np.zeros((nMats, config.nCells, config.nMuts), dtype = np.float32)
            for k in range(np.shape(matrices_n_t)[0]):
                
                fp_fn[k, matrices_n_t[k,:,:] == 1] = config.alpha
                fp_fn[k, matrices_n_t[k,:,:] == 0] = config.beta
                
                
                N01_o_ = np.sum(matrices_n_t[k,:,:] - matrices_p_t[k,:,:] == -1) 
                N10_o_ = np.sum(matrices_p_t[k,:,:] - matrices_n_t[k,:,:] == -1)
                N11_o_ = np.sum(matrices_p_t[k,:,:] + matrices_n_t[k,:,:] == 2)
                N00_o_ = np.sum(matrices_p_t[k,:,:] - matrices_n_t[k,:,:] == 0) - N11_o_
                
                f_1_to_0_o[k, 0] = N10_o_
                f_0_to_1_o[k, 0] = N01_o_
                # fp_o = config.alpha
                # fn_o = config.beta
                

                N00_o[k, 0] = N00_o_
                N11_o[k, 0] = N11_o_
                N00_NLL_o[k, 0] = N00_o_*np.log(1/(1-config.beta))
                N11_NLL_o[k, 0] = N11_o_*np.log(1/(1-config.alpha))
                N01_NLL_o[k, 0] = N01_o_*np.log(1/config.beta)
                N10_NLL_o[k, 0] = N10_o_*np.log(1/config.alpha)
                NLL_o[k, 0] = np.sum([N00_NLL_o[k, 0], N11_NLL_o[k, 0], N01_NLL_o[k, 0], N10_NLL_o[k, 0]])
                
                k += 1
                     
            l = []
            for i in range(config.nCells):
                for j in range(config.nMuts):
                    l.append([i,j])
            l = np.asarray(l)
            max_length = config.nCells * config.nMuts
            a = np.expand_dims(matrices_n_t.reshape(-1, actor.max_length),2)
            b = np.expand_dims(fp_fn.reshape(-1, actor.max_length),2)
            x = np.tile(l,(nMats,1,1))
            c = np.squeeze(np.concatenate([x,b,a], axis = 2))
            d = np.asarray([np.take(c[i,:,:],np.random.permutation(c[i,:,:].shape[0]),axis=0,out=c[i,:,:]) for i in range(np.shape(c)[0])])
            
            output_ = np.zeros((nMats, 14), dtype = np.float64)
            for j in tqdm(range(nMats)): # num of examples
                start_t = time()

                input_batch = np.tile(d[j,:,:],(actor.batch_size,1,1))
                
                feed = {actor.input_: input_batch}

                
                pos  = sess.run([actor.positions] , feed_dict=feed)[0]


                inp_ = tf.convert_to_tensor(input_batch, dtype=tf.float32)
                pos =  tf.convert_to_tensor(pos, dtype=tf.int32)

                
                r = tf.range(start = 0, limit = actor.batch_size, delta = 1)
                r = tf.expand_dims(r ,1)
                r = tf.expand_dims(r ,2)
                r3 = tf.cast(tf.ones([actor.max_length , 1]) * tf.cast(r, tf.float32), tf.int32)
                r4 = tf.squeeze(r, axis = 2)
                i = 0
                while i < int(max_length/10):    
                    r5 = tf.expand_dims(tf.fill([actor.batch_size], i), axis = 1)
                    u = tf.ones_like(r5)
                    r4_r5 = tf.concat([r4, r5], axis = 1)

                    pos_mask = tf.squeeze(tf.scatter_nd(indices = r4_r5, updates = u, shape = [actor.batch_size, actor.max_length, 1]), axis = 2)

                    pos_mask_cum1 = tf.cumsum(pos_mask, reverse = True, exclusive = True, axis = 1)
                    pos_mask_cum2 = tf.cumsum(pos_mask, reverse = False, exclusive = False, axis = 1) # for calculating NLL

                    per_pos = tf.concat([r3, tf.expand_dims(pos, axis = 2)], axis = 2)

                    per_ = tf.gather_nd(inp_, indices = per_pos)
            
                    per_matrix = per_[:,:,3:4]

                    # flipping the input
                    m1 = tf.multiply(tf.squeeze(per_matrix, axis = 2), tf.cast(pos_mask_cum1, tf.float32))
                    m1 = tf.subtract(tf.cast(pos_mask_cum1, tf.float32) , m1)
                    m2 = tf.multiply(tf.squeeze(per_matrix, axis = 2), tf.cast(pos_mask_cum2, tf.float32))
                    T_f = tf.add(m1, m2)

                    per_flipped = tf.concat([per_[:,:,0:3], tf.expand_dims(T_f, axis = 2)], axis = 2)
                    idx = tf.concat([r3, tf.cast(per_flipped[:,:,0:2], tf.int32)], axis = 2)
                    m_f = tf.scatter_nd(indices = tf.expand_dims(idx,2), updates = per_flipped[:,:,3:4], shape = tf.constant([actor.batch_size, actor.config.nCells, actor.config.nMuts]))           
                    c_v = actor.count3gametes(m_f) # cost for flipped matrix
                    V_rl = c_v.eval()
                    g = np.min(V_rl)
                    
                    # Calculating NLL
                    per_fp_fn = per_[:,:,2:3]
                    per_fp_fn_log = tf.log(1/per_fp_fn) # for N01 and N10
                    per_fp_fn_com = tf.subtract(tf.ones_like(per_fp_fn), per_fp_fn) # for N00 and N11
                    per_fp_fn_com_log = tf.log(1/per_fp_fn_com)

                    NLL_N10_N01 = tf.reduce_sum(tf.multiply(tf.squeeze(per_fp_fn_log, axis = 2), tf.cast(pos_mask_cum1, tf.float32)), axis = 1, keepdims = True)

                    per_matrix_mul_cum2 = tf.multiply(tf.squeeze(per_[:,:,3:4], axis = 2), tf.cast(pos_mask_cum2, tf.float32))
                    N11 = tf.reduce_sum(per_matrix_mul_cum2, axis = 1, keepdims = True)
                    N11_rl = tf.squeeze(N11, axis = 1).eval()
                    sum_mask_cum2 = tf.reduce_sum(tf.cast(pos_mask_cum2, tf.float32), axis = 1, keepdims = True )
                    N00 = tf.subtract(sum_mask_cum2, N11)
                    N00_rl = tf.squeeze(N00, axis = 1).eval()

                    sum_per_matrix = tf.reduce_sum(tf.squeeze(per_matrix, axis = 2) , axis = 1)
                    sum_per_fp =  tf.reduce_sum(tf.squeeze(tf.multiply(per_fp_fn, per_matrix) , axis = 2) , axis = 1)
                    fp = tf.divide(sum_per_fp, sum_per_matrix)
                    fp_r = fp.eval()

                    sum_per_fn = tf.subtract(tf.reduce_sum(tf.squeeze(per_fp_fn, axis = 2), axis = 1), sum_per_fp)
                    q = tf.cast(tf.tile(tf.constant([actor.max_length]), tf.constant([actor.batch_size])), tf.float32)
                    fn = tf.divide(sum_per_fn, tf.subtract(q, sum_per_matrix) )
                    fn_r = fn.eval()

                    fp_com = tf.log(1/tf.subtract(tf.cast(tf.tile(tf.constant([1]), tf.constant([actor.batch_size])), tf.float32), fp))
                    fn_com = tf.log(1/tf.subtract(tf.cast(tf.tile(tf.constant([1]), tf.constant([actor.batch_size])), tf.float32), fn))

                    N00_NLL = tf.multiply(tf.expand_dims(fp_com, axis = 1), N00)
                    N11_NLL = tf.multiply(tf.expand_dims(fn_com, axis = 1), N11)

                    NLL = tf.scalar_mul(config.gamma, tf.add_n([NLL_N10_N01, N00_NLL, N11_NLL ]))            
                    NLL_rl = tf.squeeze(NLL, axis =1).eval()
                    
                    g_w = np.where(V_rl == g)[0]
                    g_w_nll = np.argmin(NLL_rl[g_w])
                    gg = g_w[g_w_nll]    

                    
                    if g == 0:
                        c_v_rl = V_rl[gg]
                        m_rl = m_f.eval()[gg]                    
                        N10 = tf.reduce_sum(tf.multiply(tf.squeeze(per_matrix, axis = 2), tf.cast(pos_mask_cum1, tf.float32)), axis = 1, keepdims = True)
                        f_1_to_0_rl = tf.squeeze(N10, axis = 1)[gg].eval()
                        sum_mask_cum1 = tf.reduce_sum(tf.cast(pos_mask_cum1, tf.float32), axis = 1, keepdims = True )
                        N01 = tf.subtract(sum_mask_cum1, N10)
                        f_0_to_1_rl = tf.squeeze(N01, axis = 1)[gg].eval()
                        n_f = copy.deepcopy(i)
                        
                        # cost of original
                        idx = tf.concat([r3, tf.cast(inp_[:,:,0:2], tf.int32)], axis = 2)
                        m = tf.scatter_nd(indices = tf.expand_dims(idx,2), updates = inp_[:,:,3:4], shape = tf.constant([actor.batch_size, actor.config.nCells, actor.config.nMuts]))
                        c_v_o = actor.count3gametes(m)
                        c_n = c_v_o[0].eval()
                        fp_v = fp_r[gg]
                        fn_v = fn_r[gg]
                        c2 = copy.deepcopy(NLL_rl[gg])
                        
                        df = pd.DataFrame(m_rl.astype(int) , index = ['cell' + str(k1) for k1 in range(np.shape(m_rl)[0])], \
                                          columns = ['mut' + str(h1) for h1 in range(np.shape(m_rl)[1])])
                        df.index.rename('cellID/mutID', inplace=True)
                        df.to_csv(config.output_dir + '/mrl_{}.txt'.format(j + 1), sep='\t')
                        break
                        
                    c_t = tf.add(tf.squeeze(NLL, axis = 1), tf.cast(c_v, tf.float32))
                    
                    if i == 0:
                        c2 = copy.deepcopy(NLL_rl[gg])
                        c_v_rl = V_rl[gg]
                        n_f = copy.deepcopy(i)
                        f_0_to_1_rl = 0
                        f_1_to_0_rl = 0
                        m_rl = m_f.eval()[gg]
                        fp_v = fp_r[gg]
                        fn_v = fn_r[gg]
                        g1 = copy.deepcopy(g)
                        
                    if g1 > g: #c2 > NLL_rl[gg]:
                        c2 = copy.deepcopy(NLL_rl[gg])
                        c_v_rl = V_rl[gg]
                        n_f = copy.deepcopy(i)
                        f_0_to_1_rl = tf.squeeze(N01, axis = 1)[gg].eval()
                        f_1_to_0_rl = tf.squeeze(N10, axis = 1)[gg].eval()
                        m_rl = m_f.eval()[gg] 
                        fp_v = fp_r[gg]
                        fn_v = fn_r[gg]
                        g1 = copy.deepcopy(g)
                     
                    if i == int(max_length/10) - 1: 
                        # cost of original
                        idx = tf.concat([r3, tf.cast(inp_[:,:,0:2], tf.int32)], axis = 2)
                        m = tf.scatter_nd(indices = tf.expand_dims(idx,2), updates = inp_[:,:,3:4], shape = tf.constant([actor.batch_size, actor.config.nCells, actor.config.nMuts]))
                        c_v_o = actor.count3gametes(m)
                        c_n = c_v_o[0].eval()
                        df = pd.DataFrame(m_rl.astype(int) , index = ['cell' + str(k1) for k1 in range(np.shape(m_rl)[0])], \
                                          columns = ['mut' + str(h1) for h1 in range(np.shape(m_rl)[1])])
                        df.index.rename('cellID/mutID', inplace=True)
                        df.to_csv(config.output_dir + '/mrl_{}.txt'.format(j + 1), sep='\t') 
                    i += 1  
                dur_t = time() - start_t

                output_[j,0] = fp_v
                output_[j,1] = fn_v 
                output_[j,2] = c2  # cost (NLL part)
                output_[j,3] = c_v_rl  # cost (violation part)
                output_[j,4] = c_n # number of violations  for noisy matrix
                output_[j,5] = n_f # total number of flips based on rl
                output_[j,6] = f_0_to_1_rl
                output_[j,7] = f_1_to_0_rl
                output_[j,8] = dur_t
                # output_[j,9] = s_m[j]
                    
                    
                    
            output_[:,9] = np.squeeze(N00_o)
            output_[:,10] = np.squeeze(N11_o)
            output_[:,11] = np.squeeze(NLL_o)
            output_[:,12] = np.squeeze(f_1_to_0_o)
            output_[:,13] = np.squeeze(f_0_to_1_o)
            
            df = pd.DataFrame(output_, index = ["test" + str(k) for k in range(nMats)], \
                             columns = ["fp", "fn","NLL_rl", "V_rl", "V_o", "n_f", "f_0_to_1_rl", "f_1_to_0_rl",\
                                        "time", "N00_o", "N11_o", "NLL_o", "f_1_to_0_o", "f_0_to_1_o"])
            df.to_csv(config.output_dir + '/test_{nCells}x{nMuts}.csv'.format(nCells = config.nCells, nMuts = config.nMuts), sep = ',')
コード例 #33
0
    ########################### configs ###########################
    # config, _ = get_config()
    K.set_learning_phase(1)
    K.tensorflow_backend._get_available_gpus()
    K.clear_session()

    ########################### Training mode ###########################
    if not config.inference_mode:

        model_critic, model_actor = compile_models()
        print_config()

        ########################### Dataset ###########################

        data = data(config.nb_epoch * config.batch_size, config.nCells,
                    config.nMuts, config.ms_dir, config.alpha, config.beta)
        print('Dataset was created!')
        matrices_p, matrices_n = data
        l = []
        for i in range(config.nCells):
            for j in range(config.nMuts):
                l.append([i, j])
        l = np.asarray(l)

        f_input = np.random.randn(config.batch_size,
                                  config.input_dimension * config.hidden_dim)
        target_actor = [np.zeros((config.batch_size))]
        target_critic = [np.zeros((config.batch_size))]
        act_loss = []
        crc_loss = []
コード例 #34
0
ファイル: document-embed.py プロジェクト: Skagevang/Webint
				sum_err+=err.data.item()
				
				self.module_optimizer.step()
				num+=1

			print("Trainig err "+str(sum_err/(num)))
			# self.save()
			if self.log!="":
				with open(self.log,"a") as log:
					log.write("Training err "+str(sum_err/(num))+"\n")
			self.test()
			if self.iter==self.max_iter:
				break



if __name__ == '__main__':
	from data import *
	from content import *
	dimension=1000
	batch_size=128
	max_iter=30
	lr=0.01

	path="active1000"
	dataset=data(path)
	c=content(dataset.data,dataset.question,dataset.click_matrix,dataset.location,dataset.counter_location)
	model=embedding(dimension,batch_size,max_iter,c,lr)
	print("\nStart Training")
	model.train()
    
コード例 #35
0
ファイル: Lab1.py プロジェクト: igor17400/TrabalhoOAC
def startAnalysis():
    arr_performance_obj = []

    # str_path = input('Insira o path para o arquivo .asm: ')
    # str_path = "./simpleExpression/simple_expression.asm"
    str_path = input('Informe o arquivo .asm (informe o path):')
    data(str_path)

    labels_dict, total_execution_lines = getLabelsDict(str_path)
    total_text_lines = getTotalTextLine(str_path)
    arr_bin_machine = []
    line_pos = 0
    fake_line = 0

    #print('--------------')
    #print(labels_dict)
    #print('--------------')
    for i in range(total_text_lines):

        line = progamCounter(str_path, i)
        fake_line += 1

        if line.isspace():
            continue

        #simplifica a instrução removendo partes inutilizadas segundo nossa lógica
        line_regex = re.sub("[$,() ]"," ", line)
        line_regex = re.sub("   "," ",line_regex) #debug
        line_regex = re.sub("  "," ",line_regex) #debug
        instruction = line_regex.split()

        if instruction[0][-1] == ':' or instruction[0][0] == '.':
            continue

        if instruction[0] == 'lw' or instruction[0] == 'sw':
            ## instrução para conseguir performance
            performance = Performance(instruction[0], 'type R', datetime.datetime.now())

            opcode = TypeI.getOpcode(instruction[0])
            rs, rt = TypeI.getIRegisters(instruction)
            address = TypeI.getAddress(instruction[2])

            ## instrução para conseguir performance
            performance.setTime2(datetime.datetime.now())
            arr_performance_obj.append(performance)

            # Criar o objeto e salva-lo em uma list para acesso posteriormente
            bm = BinaryMachineI(line, str(line_pos), opcode, rs, rt, address, fake_line)
            arr_bin_machine.append(bm)

        elif instruction[0] in ('add','sub','and','or','nor','xor', 'slt'):
            performance = Performance(instruction[0], 'type R', datetime.datetime.now())

            rs, rt, rd = TypeR.getRRegisters(instruction)
            funct = TypeR.getFunct(instruction[0])

            performance.setTime2(datetime.datetime.now())
            arr_performance_obj.append(performance)

            # Criar o objeto e salva-lo em uma list para acesso posteriormente
            bm = BinaryMachineR(line, str(line_pos), '000000', rs, rt, rd, '00000', funct, fake_line)
            arr_bin_machine.append(bm)

        elif instruction[0] == 'sll' or instruction[0] == 'srl' or instruction[0] == 'sra':
            performance = Performance(instruction[0], 'type R', datetime.datetime.now())

            rs, rt, rd = TypeR.getRRegisters(instruction)
            funct = TypeR.getFunct(instruction[0])
            shamt = '{0:05b}'.format(int(instruction[3]))

            performance.setTime2(datetime.datetime.now())
            arr_performance_obj.append(performance)

            # Criar o objeto e salva-lo em uma list para acesso posteriormente
            bm = BinaryMachineR(line, str(line_pos), '000000', rs, rt, rd, shamt, funct, fake_line)
            arr_bin_machine.append(bm)
        
        elif instruction[0] == 'mult' or instruction[0] == 'div':
            performance = Performance(instruction[0], 'type R', datetime.datetime.now())

            rs, rt, rd = TypeR.getRRegisters(instruction)
            funct = TypeR.getFunct(instruction[0])

            performance.setTime2(datetime.datetime.now())
            arr_performance_obj.append(performance)

            # Criar o objeto e salva-lo em uma list para acesso posteriormente
            bm = BinaryMachineR(line, str(line_pos), '000000', rs, rt, rd, '00000', funct, fake_line)
            arr_bin_machine.append(bm)

        elif instruction[0] == 'mfhi' or instruction[0] == 'mflo':
            performance = Performance(instruction[0], 'type R', datetime.datetime.now())

            rs = '00000'
            rt = '00000'
            rd = Registers.getReg(instruction[1][0],instruction[1][1])
            funct = TypeR.getFunct(instruction[0])

            performance.setTime2(datetime.datetime.now())
            arr_performance_obj.append(performance)

            # Criar o objeto e salva-lo em uma list para acesso posteriormente
            bm = BinaryMachineR(line, str(line_pos), '000000', rs, rt, rd, '00000', funct, fake_line)
            arr_bin_machine.append(bm)
        
        elif instruction[0] == 'srav':
            performance = Performance(instruction[0], 'type R', datetime.datetime.now())

            rt, rs, rd = TypeR.getRRegisters(instruction)
            funct = TypeR.getFunct(instruction[0])

            performance.setTime2(datetime.datetime.now())
            arr_performance_obj.append(performance)

            # Criar o objeto e salva-lo em uma list para acesso posteriormente
            bm = BinaryMachineR(line, str(line_pos), '000000', rs, rt, rd, '00000', funct, fake_line)
            arr_bin_machine.append(bm)

        elif instruction[0] == 'madd' or instruction[0] == 'msubu':
            performance = Performance(instruction[0], 'type R', datetime.datetime.now())

            rs, rt, rd = TypeR.getRRegisters(instruction)
            funct = TypeR.getFunct(instruction[0])

            performance.setTime2(datetime.datetime.now())
            arr_performance_obj.append(performance)

            # Criar o objeto e salva-lo em uma list para acesso posteriormente
            bm = BinaryMachineR(line, str(line_pos), '011100', rs, rt, rd, '00000', funct)
            arr_bin_machine.append(bm)            

        elif instruction[0] == 'beq' or instruction[0] == 'bne' or\
                    instruction[0] == 'bgez' or instruction[0] == 'bgezal':
            
            performance = Performance(instruction[0], 'type I', datetime.datetime.now())

            opcode = TypeI.getOpcode(instruction[0])
            rs, rt = TypeI.getIRegisters(instruction)
            address = TypeI.getAddress(instruction, line_pos, labels_dict)

            performance.setTime2(datetime.datetime.now())
            arr_performance_obj.append(performance)

            # Criar o objeto e salva-lo em uma list para acesso posteriormente
            bm = BinaryMachineI(line, str(line_pos), opcode, rs, rt, address, fake_line)
            arr_bin_machine.append(bm)

        elif instruction[0] == 'j' or instruction[0] == 'jal':
            performance = Performance(instruction[0], 'type J', datetime.datetime.now())

            opcode = TypeJ.getOpcode(instruction[0])
            address = TypeJ.getAddress(instruction, line_pos, labels_dict)

            performance.setTime2(datetime.datetime.now())
            arr_performance_obj.append(performance)

            # Criar o objeto e salva-lo em uma list para acesso posteriormente
            bm = BinaryMachineJ(line, str(line_pos), opcode, address, fake_line)
            arr_bin_machine.append(bm)

        elif instruction[0] == 'jr':
            performance = Performance(instruction[0], 'type J', datetime.datetime.now())

            opcode = TypeJ.getOpcode(instruction[0])
            address = TypeJ.getAddress(instruction, line_pos, labels_dict, instruction[0])

            performance.setTime2(datetime.datetime.now())
            arr_performance_obj.append(performance)

            # Criar o objeto e salva-lo em uma list para acesso posteriormente
            bm = BinaryMachineJ(line, str(line_pos), opcode, address, fake_line)
            arr_bin_machine.append(bm)

        elif instruction[0] == 'jalr':
            performance = Performance(instruction[0], 'type R', datetime.datetime.now())

            rs, rt, rd = TypeR.getRRegisters(instruction)
            funct = TypeR.getFunct(instruction[0])

            performance.setTime2(datetime.datetime.now())
            arr_performance_obj.append(performance)

            # Criar o objeto e salva-lo em uma list para acesso posteriormente
            bm = BinaryMachineR(line, str(line_pos), '000000', rs, rt, rd, '00000', funct, fake_line)
            arr_bin_machine.append(bm)

        elif instruction[0] == 'lui':
            performance = Performance(instruction[0], 'type I', datetime.datetime.now())

            opcode = TypeI.getOpcode(instruction[0])
            rs, rt = TypeI.getIRegisters(instruction)
            address = TypeI.getAddress(instruction)

            performance.setTime2(datetime.datetime.now())
            arr_performance_obj.append(performance)

            # Criar o objeto e salva-lo em uma list para acesso posteriormente
            bm = BinaryMachineI(line, str(line_pos), opcode, rs, rt, address, fake_line)
            arr_bin_machine.append(bm)

        elif instruction[0] == 'li':

            performance = Performance(instruction[0], 'type I', datetime.datetime.now())

            num = int(instruction[2], 0)
            if num/65536.0 > 1:
                # PSEUDO INSTRUÇÃO
                num_lui = '0x'
                num_hex = hex(num)[6:]
                for i in range(8 - len(num_hex)):
                    num_lui += '0'
                num_lui += num_hex
                first_inst = ['lui', '1', num_lui]
                opcode = TypeI.getOpcode(first_inst[0])
                rs, rt = TypeI.getIRegisters(first_inst)
                address = TypeI.getAddress(first_inst)
                

                # Criar o objeto e salva-lo em uma list para acesso posteriormente
                bm = BinaryMachineI(line, str(line_pos), opcode, rs, rt, address, fake_line)
                arr_bin_machine.append(bm)

                #### incrementar linha pseudo instrução
                line_pos += 1 

                num_lui = '0x'
                num_hex = hex(num)[:6]
                for i in range(8 - len(num_hex)):
                    num_lui += '0'
                num_lui += num_hex[2:]
                second_inst = ['ori', instruction[1], '1', num_lui]
                opcode = TypeI.getOpcode(second_inst[0])
                rs, rt = TypeI.getIRegisters(second_inst)
                address = TypeI.getAddress(second_inst)

                performance.setTime2(datetime.datetime.now())
                arr_performance_obj.append(performance)
                # Criar o objeto e salva-lo em uma list para acesso posteriormente
                bm = BinaryMachineI(line, str(line_pos), opcode, rs, rt, address, fake_line)
                bm.setIsPseudo(True)
                arr_bin_machine.append(bm)

            else:
                analogo_inst = ['addiu', instruction[1], '0', instruction[2]]
                opcode = TypeI.getOpcode(analogo_inst[0])
                rs, rt = TypeI.getIRegisters(analogo_inst)
                address = TypeI.getAddress(analogo_inst)

                performance.setTime2(datetime.datetime.now())
                arr_performance_obj.append(performance)
                # Criar o objeto e salva-lo em uma list para acesso posteriormente
                bm = BinaryMachineI(line, str(line_pos), opcode, rs, rt, address, fake_line)
                arr_bin_machine.append(bm)


            

        elif instruction[0] == 'addi' or instruction[0] == 'andi' or\
                instruction[0] == 'ori' or instruction[0] == 'xori':
            
            performance = Performance(instruction[0], 'type I', datetime.datetime.now())

            imidiate_int = int(instruction[3], 0)
            if imidiate_int < 0 and (instruction[0] == 'andi' or\
                        instruction[0] == 'ori' or instruction[0] == 'xori'):
                first_pseudo_inst = ['lui', '1', '0xffff']
                opcode = TypeI.getOpcode(first_pseudo_inst[0])
                rs, rt = TypeI.getIRegisters(first_pseudo_inst)
                address = TypeI.getAddress(first_pseudo_inst)

                # Criar o objeto e salva-lo em uma list para acesso posteriormente
                bm = BinaryMachineI(line, str(line_pos), opcode, rs, rt, address, fake_line)
                bm.setIsPseudo(True)
                arr_bin_machine.append(bm)
                
                #### incrementar linha pseudo instrução
                line_pos += 1 

                ### Como temos uma pseudo intrução, será necessário salvar mais objetos
                second_pseudo_inst = ['ori', '1', '1', instruction[3]]
                opcode = TypeI.getOpcode(second_pseudo_inst[0])
                rs, rt = TypeI.getIRegisters(second_pseudo_inst)
                address = TypeI.getAddress(second_pseudo_inst)

                # Criar o objeto e salva-lo em uma list para acesso posteriormente
                bm = BinaryMachineI(line, str(line_pos), opcode, rs, rt, address, fake_line)
                bm.setIsPseudo(True)
                arr_bin_machine.append(bm)

                #### incrementar linha pseudo instrução
                line_pos += 1 

                if(instruction[0] == 'andi'):
                    third_pseudo_inst = ['and', instruction[1], instruction[2], '1']
                    rs, rt, rd = TypeR.getRRegisters(third_pseudo_inst)
                    funct = TypeR.getFunct(third_pseudo_inst[0])
                elif(instruction[0] == 'ori'):
                    third_pseudo_inst = ['or', instruction[1], instruction[2], '1']
                    rs, rt, rd = TypeR.getRRegisters(third_pseudo_inst)
                    funct = TypeR.getFunct(third_pseudo_inst[0])
                elif(instruction[0] == 'xori'):
                    third_pseudo_inst = ['xor', instruction[1], instruction[2], '1']
                    rs, rt, rd = TypeR.getRRegisters(third_pseudo_inst)
                    funct = TypeR.getFunct(third_pseudo_inst[0])

                # Criar o objeto e salva-lo em uma list para acesso posteriormente
                bm = BinaryMachineR(line, str(line_pos), '000000', rs, rt, rd, '00000', funct, fake_line)
                arr_bin_machine.append(bm)

            else: 
                opcode = TypeI.getOpcode(instruction[0])
                rs, rt = TypeI.getIRegisters(instruction)
                address = TypeI.getAddress(instruction)

                performance.setTime2(datetime.datetime.now())
                arr_performance_obj.append(performance)

                # Criar o objeto e salva-lo em uma list para acesso posteriormente
                bm = BinaryMachineI(line, str(line_pos), opcode, rs, rt, address, fake_line)
                arr_bin_machine.append(bm)
        
        elif instruction[0] == 'clo':
            performance = Performance(instruction[0], 'type I', datetime.datetime.now())

            rs, rt, rd = TypeR.getRRegisters(instruction)
            funct = TypeR.getFunct(instruction[0])

            performance.setTime2(datetime.datetime.now())
            arr_performance_obj.append(performance)

            # Criar o objeto e salva-lo em uma list para acesso posteriormente
            bm = BinaryMachineR(line, str(line_pos), '011100', rs, rt, rd, '00000', funct, fake_line)
            arr_bin_machine.append(bm)

        else:
            print("ERROR - INSTRUCTION NOT RECOGNIZED")
            print('------------------')
            print(instruction)
            print('------------------')

        line_pos += 1

    ## Salvar arquivo .mif
    Saida.saveFileText(arr_bin_machine, 'saida_text')

    return arr_bin_machine, arr_performance_obj
コード例 #36
0
# gmail: vevukotic

# learnMNIST.py
# learns a classifier from the MNIST training set
# stored in data/MNISTtrain.pkl

# after each iteration, the model is stored in weights-MNIST.pkl
# be careful, the previous model will be overwritten

# TODO: load previous model, if it exists, before learning



if __name__ == "__main__":

	d = data()
	images, labels = d.loadData("../data/MNISTtrain.pkl")
	print "Loaded", len(images), "images of shape", images[0].shape

	inputLayer0  = layerFM(1, 32, 32, isInput = True) 
	convLayer1   = layerFM(6, 28, 28)
	poolLayer2   = layerFM(6, 14, 14)
	convLayer3   = layerFM(16, 10, 10)
	poolLayer4   = layerFM(16, 5, 5)
	convLayer5   = layerFM(100, 1, 1)
	hiddenLayer6 = layer1D(80)
	outputLayer7 = layer1D(10, isOutput = True)

	convolution01  = convolutionalConnection(inputLayer0, convLayer1, np.ones([1, 6]), 5, 5, 1, 1)	
	pooling12      = poolingConnection(convLayer1, poolLayer2, 2, 2)
	convolution23  = convolutionalConnection(poolLayer2, convLayer3, np.ones([6, 16]), 5, 5, 1, 1)	
コード例 #37
0
ファイル: Hades.py プロジェクト: dalg24/myFEM
#!/usr/bin/env python

# Code made by Bruno Turcksin
# Interface of the library myFEM

import sys

from data import *

try :
    file_path = sys.argv[1]
    a = data(file_path)
    a.create_mesh() 

except IndexError :
    print ("You need to give an input file")
except IOError :
    print ("Cannot open the input file")