コード例 #1
0
 def productsKidsRequest():
     oGetData = GetData()
     oUnisportData = oGetData.GetUnisportData()
     
     oSortData   = SortData()
     oSortedData = oSortData.getDictsByStringInKeyValue(oUnisportData, "name", "Børn")
     #paginate the result to only show 10 pr page,         #paginate the result to only show 10 pr page
     iPageVar=1
     #paginate the result to only show 10 pr page
     oPage= oSortData.paginateList(oSortedData, iPageVar, 10)
     #convert the list back to json
     return json.dumps(oPage)
コード例 #2
0
    def productCreateRequest():
        oGetData = GetData()
        oUnisportData = oGetData.GetUnisportData()

        if not request.json:
            return "json data not valid"           
        
        oNewProduct = json.loads(request.json)    
        oManipulateData = ManipulateData()
        oSortedData = oManipulateData.createProduct(oUnisportData, oNewProduct)
    
        #convert the list back to json
        return json.dumps(oSortedData)    
コード例 #3
0
def GetTestFeatures():
    Data=GetData()
    TrainSet=Data[12]

    # print(TrainSet['cycle_life'])

    features=[]
    l=[]
    for i in TrainSet['summary']:
        if(i == "QD"):
            # print(i)
            # l.append(i)
            d=TrainSet['summary'][i]
            d=d.reshape(-1,1)
            min_max_scaler = preprocessing.MinMaxScaler()    
            x_minmax = min_max_scaler.fit_transform(d)
            # x_minmax=d
            features.append(x_minmax.squeeze())

    features=np.array(features)
    print(features.shape)
    features=features.T
    print(features.shape)
    # features=features.reshape((849,-1))
    return features
コード例 #4
0
ファイル: run.py プロジェクト: danjamker/N-Fly
    def __init__(self, llwl='Brown', llNL=2, percen=80, NE = True, Col = True, Gram = True, Chu = True):
        '''      
        @param llwl:LogLikleyHood Corpa name ('Brown','AmE06','BE06')
        @param llNL:LogLikleyHood 
        @param percen: Presision of output default = 20, 20% returned
        @param NE: Uses NE default True 
        @param Col: Uses Collocation default True
        @param Gram: Uses N-Grams default True
        @param Chu: Uses Chunking default True
        '''

        self.NEs = NE
        self.Col = Col
        self.Gram = Gram
        self.Chu = Chu
        self.p = percen
        print 'Starting to build ', llwl
        self.LL = LogLikelihood(wordlist=llwl, NLength=llNL)
        print 'LL Loaded'
        self.POS = POS()
        print 'POS Loaded'
        self.GD = GetData()
        print 'GD Loaded'
        self.Cu = Chunker(self.POS)
        print 'Cu Loaded'
        self.FL = Filter()
        print 'FL Loaded'
        self.CC = Collocation(self.POS)
        print 'CC Loaded'
        self.Ng = NGram()
        print 'Ng Loaded'
        self.S = Select(percentil=self.p)
        print 'S Loaded'
        self.To = Tokenize(self.FL)
        print 'To Loaded'
コード例 #5
0
 def productsRequest():
     oGetData = GetData()
     oUnisportData = oGetData.GetUnisportData()
     
     oSortData = SortData()
     oSortedData = oSortData.sortByPrice(oUnisportData, False)
     
     iPageVar = str(request.args.get('page'))
     #test if the page value only contains numbers
     if not (iPageVar.isdigit()):
         iPageVar="1"
     #paginate the result to only show 10 pr page
     oPage= oSortData.paginateList(oSortedData, iPageVar, 10)
     
     #oPage = paginate.Page(oSortedData,page=iPageVar,items_per_page=10)
     #convert the list back to json
     return json.dumps(oPage)
コード例 #6
0
ファイル: run.py プロジェクト: danjamker/N-Fly
    def __init__(self,
                 llwl='Brown',
                 llNL=2,
                 percen=80,
                 NE=True,
                 Col=True,
                 Gram=True,
                 Chu=True):
        '''      
        @param llwl:LogLikleyHood Corpa name ('Brown','AmE06','BE06')
        @param llNL:LogLikleyHood 
        @param percen: Presision of output default = 20, 20% returned
        @param NE: Uses NE default True 
        @param Col: Uses Collocation default True
        @param Gram: Uses N-Grams default True
        @param Chu: Uses Chunking default True
        '''

        self.NEs = NE
        self.Col = Col
        self.Gram = Gram
        self.Chu = Chu
        self.p = percen
        print 'Starting to build ', llwl
        self.LL = LogLikelihood(wordlist=llwl, NLength=llNL)
        print 'LL Loaded'
        self.POS = POS()
        print 'POS Loaded'
        self.GD = GetData()
        print 'GD Loaded'
        self.Cu = Chunker(self.POS)
        print 'Cu Loaded'
        self.FL = Filter()
        print 'FL Loaded'
        self.CC = Collocation(self.POS)
        print 'CC Loaded'
        self.Ng = NGram()
        print 'Ng Loaded'
        self.S = Select(percentil=self.p)
        print 'S Loaded'
        self.To = Tokenize(self.FL)
        print 'To Loaded'
コード例 #7
0
    def request_handler(self, type):
        if type == 'GET':
            path = urlparse.urlparse(self.path)
            isparam = re.search('\?', self.path)
            querystr = path.query
            query = {}
            if isparam:
                for q in querystr.split('&'):
                    key = q.split('=')[0]
                    value = q.split('=')[1]
                    query[key] = value
                self.send_response(200)
                self.send_header('Content-Type', 'application/json')
                self.end_headers()
                e = GetData()
                filejson = e.loadfilterdata(query, ServConf().initconf())
                self.wfile.write(filejson)
            else:
                self.send_error(400)
#        else:
コード例 #8
0
ファイル: main.py プロジェクト: hansputera/zhycorppy
  def __init__(self, botID: str):
    self.author = author
    self.version = version

    if len(botID) < 18 or len(botID) > 18:
      print(f"{botID} invalid")
    else:
      self.botID = botID
      self.data = GetData(botID)

    def getData(self):
      return self.data
コード例 #9
0
def test(f, idx=14, filename="m1.h5"):
    model = load_model(filename)
    model.summary()
    features = GetFeatures(idx, f)
    testX, testY = BuildSet(features, step)
    testX = np.array(testX)
    testY = np.array(testY)
    testX = testX.reshape(testX.shape[0], step, -1)

    ans = model.predict(testX)

    plt.figure(1)
    Data = GetData()
    TrainSet = Data[idx]
    capacity = []
    IR = []
    for i in TrainSet['summary']:
        if (i in ["QD"]):
            d = TrainSet['summary'][i]
            capacity = d
        if (i in ["IR"]):
            d = TrainSet['summary'][i]
            IR = d

    # plt.subplot(311)
    # plt.plot(capacity,label="QD")
    # plt.xlabel("Cycle")
    # plt.ylabel("Discharge Capacity (Ah)")
    # plt.legend()

    # plt.subplot(312)
    # plt.plot(IR,label="IR")
    # plt.xlabel("Cycle")
    # plt.ylabel("Internal Resistance (Ohm)")
    # plt.legend()

    # plt.subplot(313)
    testY *= len(capacity)
    delta = []
    for i in range(len(ans)):
        ans[i] = i / (1 - ans[i]) * ans[i]
        delta.append(ans[i] - testY[i])
    x = np.arange(step, len(capacity) + 1)
    # plt.plot(x,ans,label="Remain Cycles Predict")
    plt.plot(x, delta)

    plt.xlabel("Last cycle")
    plt.ylabel("Cycle Error")
    plt.ylim((-100, 100))
    plt.legend()
    plt.show()
コード例 #10
0
ファイル: Bot.py プロジェクト: peytonbair/tradebot
class Bot:
    def __init__(self, pair, interval):
        self.pair = pair
        self.interval = interval
        self.df = GetData(pair, interval)
        self.df = self.df.getData()

    def run(self):
        pair = self.pair
        interval = self.interval
        df = self.df
        strat = Strategy(df, pair)
        strat.run() #run test the entire day and live goes live
        strat.output(True)
コード例 #11
0
def main():
    Data = GetData()
    Data = AddPos(Data)
    AddDate(Data)

    # The amount of twitters
    a = Sort_Dict(Counter(Data.date))
    plt.plot([i[0] for i in a], [i[1] for i in a])

    AddUserName(Data)
    move_list = GetAllMove_FasterVersion(Data)
    day_m = AggMoveListByTime(move_list, way='day')
    week_m = AggMoveListByTime(move_list, way='week')
    month_m = AggMoveListByTime(move_list, way='month')
    Save_Obj(day_m, './Data/day_move')
    Save_Obj(week_m, './Data/week_move')
    Save_Obj(month_m, './Data/month_move')
コード例 #12
0
def submit():
    global hsl
    if (ekspedisi.get() == 1):
        hsl = "JNE"
    if (ekspedisi.get() == 2):
        hsl = "JNT"

    if ((input1.get() != "") and (input2.get() != "") and (input3.get() != "")
            and (drop.get() != "")):
        dataTable.append(
            GetData(input1.get(), input2.get(), input3.get(), drop.get(),
                    list(check.hasil()), hsl))
        messagebox.showinfo("", "Data Berhasil Dimasukan")
    else:
        messagebox.showwarning("", "Data tidak lengkap")

    input1.delete(0, END)
    input2.delete(0, END)
    input3.delete(0, END)
    drop.delete(0, END)
コード例 #13
0
def main():
    #import data
    test_data = GetData(TEST_DIR)
    print(test_data.source_list)
    

    with tf.name_scope('inputs'):
        #create the model
        x=tf.placeholder(tf.float32,[Batch_SIZE,Img_depth,Img_rows,Img_cols,1],name='x_input')
        
        # Define loss and optimizer
        y_ = tf.placeholder(tf.int16, [Batch_SIZE,Img_depth, Img_rows,Img_cols,n_class],name='y__input')

    #define a global step
    global_step = tf.Variable(0,name="global_step")  

    # Build the graph for the deep net
    network, outputs= network(x)
    
    
    dice_loss = dice_coef_loss(outputs,y_)
        
    with tf.name_scope('train'):
        train_step = tf.train.AdamOptimizer(1e-5).minimize(dice_loss)

    
    #add ops to save and restore all the variables
    saver = tf.train.Saver()
    
    
    #use only single CPU
    m_config = tf.ConfigProto()
    m_config.gpu_options.allow_growth = True

    with tf.Session(config=m_config) as sess:

        
#        sess.run(tf.global_variables_initializer())    #when continue training this model, should comment this line
        
        #first start to train the model, should comment these lines
        check_points_list = tf.train.latest_checkpoint(LOG_DIR)   #return the filename of the lastest checkpoint
        print(len(check_points_list))
        print(check_points_list)  #is the name of this checkpoint 
        saver.restore(sess,check_points_list)  
#        
        
        global_step_value = sess.run(global_step)
        print("Last iteration:",global_step_value)
        for i in range(global_step_value+1,116001+1): #这里只是想让for循环跑一遍
            last_point=0
            
            for p in range(73):  #根据原始NII图像分割出来的patch的数量而定
                print(p)
                images_test=test_data.next_batch_order_2(Batch_SIZE,"mr_train_1019_boundingBox.nii.gz",64,16,last_point)
                dp_dict = tl.utils.dict_to_one(network.all_drop)  #disable nosie layers when testing
                feed_dict_test = {x: images_test}
                feed_dict_test.update(dp_dict)

                output_image = sess.run(outputs,feed_dict=feed_dict_test)  #use the test next_batch
#                output_image = outputs.eval(feed_dict=feed_dict_test)
                print(type(output_image))
                print(np.shape(output_image))
#                output_image = np.asarray(output_image)
#                output_image= outputs.eval(feed_dict={x:images})
                for j in range(last_point,last_point+Batch_SIZE):

                    
                    input_Image=images_test[...,0]
                    
                    LVB = output_image[...,0]
                    out_LVB = LVB[j-last_point,...]
                    RVB = output_image[...,1]
                    out_RVB = RVB[j-last_point,...]
                    LAB = output_image[...,2]
                    out_LAB = LAB[j-last_point,...]
                    RAB = output_image[...,3]
                    out_RAB = RAB[j-last_point,...]
                    MLV = output_image[...,4]
                    out_MLV = MLV[j-last_point,...]
                    AA  = output_image[...,5]
                    out_AA = AA[j-last_point,...]
                    PA  = output_image[...,6]
                    out_PA = PA[j-last_point,...]
                    BACK = output_image[...,7]
                    out_BACK = BACK[j-last_point,...]
                    
                    CreatNii_save(out_LVB,save_dir,"out_LVB" +str(i)+"_"+str(j)+ ".nii.gz",np.eye(4))
                    CreatNii_save(out_RVB,save_dir,"out_RVB" +str(i)+"_"+str(j)+ ".nii.gz",np.eye(4))
                    CreatNii_save(out_LAB,save_dir,"out_LAB" +str(i)+"_"+str(j)+ ".nii.gz",np.eye(4))
                    CreatNii_save(out_RAB,save_dir,"out_RAB" +str(i)+"_"+str(j)+ ".nii.gz",np.eye(4))
                    CreatNii_save(out_MLV,save_dir,"out_MLV" +str(i)+"_"+str(j)+ ".nii.gz",np.eye(4))
                    CreatNii_save(out_AA,save_dir,"out_AA" +str(i)+"_"+str(j)+ ".nii.gz",np.eye(4))
                    CreatNii_save(out_PA,save_dir,"out_PA" +str(i)+"_"+str(j)+ ".nii.gz",np.eye(4))
                    CreatNii_save(out_BACK,save_dir,"out_BACK" +str(i)+"_"+str(j)+ ".nii.gz",np.eye(4))
                    
                    CreatNii_save(input_Image[j-last_point,...],save_dir,"Input_Test_Image" +str(i)+"_"+str(j)+ ".nii.gz",np.eye(4))

                    
                last_point = last_point+Batch_SIZE
コード例 #14
0
#!/usr/bin/env python2
# -*- coding: utf-8 -*-

import datetime
from GetData import GetData
from skfeature.utility.construct_W import construct_W
from skfeature.function.similarity_based import lap_score
from skfeature.function.sparse_learning_based import MCFS
from EntropyBasedFeatureRanking import EntropyBasedFeatureRanking
from skfeature.function.similarity_based import SPEC

# initialization
methodType = 0
dataSet = 0
data = GetData(dataSet)
print "Data Preparation finished."

timeStart = datetime.datetime.now()

# feature selection
if methodType == 0:
    # Laplacian Score
    kwrags_W = {
        "metric": "euclidean",
        "neighbor_mode": "knn",
        "weight_mode": "heat_kernel",
        "k": 5,
        "t": 1
    }
    W = construct_W(data, **kwrags_W)
    result = lap_score.lap_score(data, W=W)
コード例 #15
0
Last edited : 4:20pm 5/18/2020
"""

from GetData import GetData
from Utility import Util
from Transform import Transform
import numpy as np
import seaborn as sn
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.model_selection import cross_val_score

#read config
config = Util.read_config()
#get data
df = GetData.read_csv(GetData, config['data']['FuelConsumption']['filepath'])
#get all numeric columns
cols_numeric = df.select_dtypes([np.number]).columns
#remove non-numeric columns
df = df[cols_numeric]
#find correlations between variables
corrMatrix = df.corr()
mask = np.zeros_like(corrMatrix)
mask[np.triu_indices_from(mask)] = True
with sn.axes_style("white"):
    f, ax = plt.subplots(figsize=(7, 5))
    sn.heatmap(corrMatrix,
               vmin=-1,
               vmax=1,
               mask=mask,
               annot=True,
コード例 #16
0
ファイル: Main.py プロジェクト: xinsicheng/DemaciaProject
def main():
	api = RiotAPI("RGAPI-ac961b5c-4740-4fd0-9f9b-585ec0b78924")
	gets = GetData(api)
	gets.run()
コード例 #17
0
ファイル: run.py プロジェクト: danjamker/N-Fly
class runable(object):
    '''
    Class for selecting keywords and extracting keywords from online contentent.
    '''

    def __init__(self, llwl='Brown', llNL=2, percen=80, NE = True, Col = True, Gram = True, Chu = True):
        '''      
        @param llwl:LogLikleyHood Corpa name ('Brown','AmE06','BE06')
        @param llNL:LogLikleyHood 
        @param percen: Presision of output default = 20, 20% returned
        @param NE: Uses NE default True 
        @param Col: Uses Collocation default True
        @param Gram: Uses N-Grams default True
        @param Chu: Uses Chunking default True
        '''

        self.NEs = NE
        self.Col = Col
        self.Gram = Gram
        self.Chu = Chu
        self.p = percen
        print 'Starting to build ', llwl
        self.LL = LogLikelihood(wordlist=llwl, NLength=llNL)
        print 'LL Loaded'
        self.POS = POS()
        print 'POS Loaded'
        self.GD = GetData()
        print 'GD Loaded'
        self.Cu = Chunker(self.POS)
        print 'Cu Loaded'
        self.FL = Filter()
        print 'FL Loaded'
        self.CC = Collocation(self.POS)
        print 'CC Loaded'
        self.Ng = NGram()
        print 'Ng Loaded'
        self.S = Select(percentil=self.p)
        print 'S Loaded'
        self.To = Tokenize(self.FL)
        print 'To Loaded'
    
    def Select(self, url, depth): 
        '''
        Determin the best keywords for a webpage.
        
        @param url: the base url to start sampaling from
        @param depth: the depth of the website to be sampled
        
        @return: the list of selected keywords, ordered with the highest rated words to the lower bownd of array.
        '''     
        #Get data from web page
        text = self.GD.getWebPage(url, depth)

        #Tokonize sentance and words
        tok = self.To.Tok(text)

        #POS tag the text
        pos = self.POS.POSTag(tok, 'tok')

        #Log Likly Hood
        log = self.LL.calcualte(tok)
 
        #Collocations
        if self.Col == True:
            col = self.CC.col(pos, tok)
        else:
            col = []

        #NE Extraction
        if self.NEs == True:
            ne = self.Cu.Chunks(pos, nodes=['PERSON', 'ORGANIZATION', 'LOCATION'])
        else:
            ne = []
         
        #Extract NP
        if self.Chu == True:
            chu = [self.Cu.parse(p) for p in pos]
        else:
            chu = []
        
        #Creat N-gram 
        if self.Gram == True:
            ga = self.Ng.Grams(pos, n=6)
        else:
            ga = []
        
        return self.S.keywords(ne, ga , col , chu, log)
コード例 #18
0
ファイル: Bot.py プロジェクト: peytonbair/tradebot
 def __init__(self, pair, interval):
     self.pair = pair
     self.interval = interval
     self.df = GetData(pair, interval)
     self.df = self.df.getData()
コード例 #19
0
from GetLinks import GetLinks
from GetData import GetData
from Register import Register
from GetContacts import GetContacts
from GetUniqueEmailPatterns import GetUniqueEmailPatterns
from GenerateEmails import GenerateEmails

if __name__ == '__main__':
    GetLinks.get_links()
    GetData().run()
    Register.register()
    GetContacts().run(4)
    print(GetUniqueEmailPatterns.get_unique_email_patterns())
    input(
        'Now you should write code for found email patterns. Enter when ready.'
    )
    GenerateEmails().run()
コード例 #20
0
ファイル: run.py プロジェクト: danjamker/N-Fly
class runable(object):
    '''
    Class for selecting keywords and extracting keywords from online contentent.
    '''
    def __init__(self,
                 llwl='Brown',
                 llNL=2,
                 percen=80,
                 NE=True,
                 Col=True,
                 Gram=True,
                 Chu=True):
        '''      
        @param llwl:LogLikleyHood Corpa name ('Brown','AmE06','BE06')
        @param llNL:LogLikleyHood 
        @param percen: Presision of output default = 20, 20% returned
        @param NE: Uses NE default True 
        @param Col: Uses Collocation default True
        @param Gram: Uses N-Grams default True
        @param Chu: Uses Chunking default True
        '''

        self.NEs = NE
        self.Col = Col
        self.Gram = Gram
        self.Chu = Chu
        self.p = percen
        print 'Starting to build ', llwl
        self.LL = LogLikelihood(wordlist=llwl, NLength=llNL)
        print 'LL Loaded'
        self.POS = POS()
        print 'POS Loaded'
        self.GD = GetData()
        print 'GD Loaded'
        self.Cu = Chunker(self.POS)
        print 'Cu Loaded'
        self.FL = Filter()
        print 'FL Loaded'
        self.CC = Collocation(self.POS)
        print 'CC Loaded'
        self.Ng = NGram()
        print 'Ng Loaded'
        self.S = Select(percentil=self.p)
        print 'S Loaded'
        self.To = Tokenize(self.FL)
        print 'To Loaded'

    def Select(self, url, depth):
        '''
        Determin the best keywords for a webpage.
        
        @param url: the base url to start sampaling from
        @param depth: the depth of the website to be sampled
        
        @return: the list of selected keywords, ordered with the highest rated words to the lower bownd of array.
        '''
        #Get data from web page
        text = self.GD.getWebPage(url, depth)

        #Tokonize sentance and words
        tok = self.To.Tok(text)

        #POS tag the text
        pos = self.POS.POSTag(tok, 'tok')

        #Log Likly Hood
        log = self.LL.calcualte(tok)

        #Collocations
        if self.Col == True:
            col = self.CC.col(pos, tok)
        else:
            col = []

        #NE Extraction
        if self.NEs == True:
            ne = self.Cu.Chunks(pos,
                                nodes=['PERSON', 'ORGANIZATION', 'LOCATION'])
        else:
            ne = []

        #Extract NP
        if self.Chu == True:
            chu = [self.Cu.parse(p) for p in pos]
        else:
            chu = []

        #Creat N-gram
        if self.Gram == True:
            ga = self.Ng.Grams(pos, n=6)
        else:
            ga = []

        return self.S.keywords(ne, ga, col, chu, log)
コード例 #21
0
ファイル: DealWithData.py プロジェクト: NickYangT/MM_data
 def __init__(self, code):
     self.dicts = GetData(code).request_data()
     print self.dicts
コード例 #22
0
def main():
    #import data
    training_data = GetData(TRAINING_DIR)
    test_data = GetData(TEST_DIR)

    with tf.name_scope('inputs'):
        #create the model
        x = tf.placeholder(tf.float32,
                           [Batch_SIZE, Img_depth, Img_rows, Img_cols, 1],
                           name='x_input')

        # Define loss and optimizer
        y_ = tf.placeholder(
            tf.int16, [Batch_SIZE, Img_depth, Img_rows, Img_cols, n_class],
            name='y__input')

    #define a global step
    global_step = tf.Variable(0, name="global_step")

    # Build the graph for the deep net
    network, outputs = network(x)

    dice_loss = dice_coef_loss(outputs, y_)

    with tf.name_scope('train'):
        train_step = tf.train.AdamOptimizer(1e-5).minimize(dice_loss)

    #add ops to save and restore all the variables
    saver = tf.train.Saver()

    training_summary = tf.summary.scalar("training_loss", dice_loss)
    validation_summary = tf.summary.scalar("validation_loss", dice_loss)

    #use only single CPU
    m_config = tf.ConfigProto()
    m_config.gpu_options.allow_growth = True

    with tf.Session(config=m_config) as sess:

        summary_writer = tf.summary.FileWriter("log/", sess.graph)

        sess.run(tf.global_variables_initializer(
        ))  #when continue training this model, should comment this line

        #first start to train the model, should comment these lines
        #        check_points_list = tf.train.latest_checkpoint(LOG_DIR)   #return the filename of the lastest checkpoint
        #        print(len(check_points_list))
        #        print(check_points_list)  #is the name of this checkpoint
        #        saver.restore(sess,check_points_list)
        #

        global_step_value = sess.run(global_step)
        print("Last iteration:", global_step_value)
        for i in range(global_step_value + 1, 150000 + 1):
            images, labels = training_data.next_batch(Batch_SIZE)
            feed_dict_train = {x: images, y_: labels}
            feed_dict_train.update(network.all_drop)  #enable noise layers
            train_step.run(feed_dict=feed_dict_train)

            if i % 50 == 0:
                print("iteration now:", i)
                train_loss, train_summ = sess.run(
                    [dice_loss, training_summary], feed_dict=feed_dict_train)
                summary_writer.add_summary(train_summ, i)
                print('train loss %g' % train_loss)

                images_test, labels_test = test_data.next_batch(Batch_SIZE)
                dp_dict = tl.utils.dict_to_one(
                    network.all_drop)  #disable nosie layers when testing
                feed_dict_test = {x: images_test, y_: labels_test}
                feed_dict_test.update(dp_dict)
                #                loss = dice_loss.eval(feed_dict=feed_dict)
                valid_loss, valid_summ = sess.run(
                    [dice_loss, validation_summary], feed_dict=feed_dict_test)
                summary_writer.add_summary(valid_summ, i)
                print('test loss %g' % valid_loss)
                print('----------------------------------')
            if i % 5000 == 0:
                print("iteration now:", i)

                output_image = sess.run(
                    outputs,
                    feed_dict=feed_dict_test)  #use the test next_batch
                #                output_image = outputs.eval(feed_dict=feed_dict_test)
                print(type(output_image))
                print(np.shape(output_image))
                #                output_image = np.asarray(output_image)
                #                output_image= outputs.eval(feed_dict={x:images})
                for j in range(Batch_SIZE):

                    labels_test_union = labels_test[
                        ...,
                        0] * 500 + labels_test[..., 1] * 600 + labels_test[
                            ...,
                            2] * 420 + labels_test[..., 3] * 550 + labels_test[
                                ..., 4] * 205 + labels_test[
                                    ..., 5] * 820 + labels_test[..., 6] * 850
                    input_Image = images_test[..., 0]

                    LVB = output_image[..., 0]
                    out_LVB = LVB[j, ...]
                    RVB = output_image[..., 1]
                    out_RVB = RVB[j, ...]
                    LAB = output_image[..., 2]
                    out_LAB = LAB[j, ...]
                    RAB = output_image[..., 3]
                    out_RAB = RAB[j, ...]
                    MLV = output_image[..., 4]
                    out_MLV = MLV[j, ...]
                    AA = output_image[..., 5]
                    out_AA = AA[j, ...]
                    PA = output_image[..., 6]
                    out_PA = PA[j, ...]
                    BACK = output_image[..., 7]
                    out_BACK = BACK[j, ...]
                    #将heart单独的label存储下来,查看效果
                    CreatNii_save(
                        out_LVB, save_dir,
                        "out_LVB" + str(i) + "_" + str(j) + ".nii.gz",
                        np.eye(4))
                    CreatNii_save(
                        out_RVB, save_dir,
                        "out_RVB" + str(i) + "_" + str(j) + ".nii.gz",
                        np.eye(4))
                    CreatNii_save(
                        out_LAB, save_dir,
                        "out_LAB" + str(i) + "_" + str(j) + ".nii.gz",
                        np.eye(4))
                    CreatNii_save(
                        out_RAB, save_dir,
                        "out_RAB" + str(i) + "_" + str(j) + ".nii.gz",
                        np.eye(4))
                    CreatNii_save(
                        out_MLV, save_dir,
                        "out_MLV" + str(i) + "_" + str(j) + ".nii.gz",
                        np.eye(4))
                    CreatNii_save(out_AA, save_dir,
                                  "out_AA" + str(i) + "_" + str(j) + ".nii.gz",
                                  np.eye(4))
                    CreatNii_save(out_PA, save_dir,
                                  "out_PA" + str(i) + "_" + str(j) + ".nii.gz",
                                  np.eye(4))
                    CreatNii_save(
                        out_BACK, save_dir,
                        "out_BACK" + str(i) + "_" + str(j) + ".nii.gz",
                        np.eye(4))

                    CreatNii_save(
                        input_Image[j, ...], save_dir,
                        "Input_Test_Image" + str(i) + "_" + str(j) + ".nii.gz",
                        np.eye(4))
                    CreatNii_save(
                        (labels_test_union[j,
                                           ...]).astype(np.float32), save_dir,
                        "Test_Label" + str(i) + "_" + str(j) + ".nii.gz",
                        np.eye(4))

            if i % 1000 == 0:
                print("iteration now:", i)
                #注意global_step.assign()并不会改变global_step的值,只是创造了这么一个操作,只有运行它之后,global_step才会真正被赋值
                global_step_op = global_step.assign(
                    i
                )  #this line is necessary, if not the iteration number is always 0
                print("global_step_value:", sess.run(global_step_op))
                saver.save(
                    sess, CHECKPOINT_FL, global_step=i
                )  #the "global_step" here is different from the one above
                print("================================")
                print("model is saved")
コード例 #23
0
'''
Created on Apr 22, 2017

@author: davidryv
'''
from GetData import GetData as ReadData

if __name__ == '__main__':

    GetUsers = ReadData()

    userList = GetUsers.getUserList()

    spaces = GetUsers.getSpaces()

    for space in spaces:
        print 'Deleting' + space['wiki_name']
        users, role, id = GetUsers.getUsers(space['name'])
        for user in users:
            if user['login'].encode('utf-8') in userList:

                print user['login'].encode(
                    'utf-8') + " , Deleted from " + space['wiki_name'].encode(
                        'utf-8')
                GetUsers.DeleteUsersList(user['id'].encode('utf-8'),
                                         user['login'].encode('utf-8'), role,
                                         space['wiki_name'].encode('utf-8'))

            else:
                print '-----------Not deleted-----------'
                print user['login'].encode(