示例#1
0
def FeatureExtraction(Sample,Feature,Data_Array):
    # Append the data
    Data_Array = np.append(Data_Array,Sample,axis=0)
           
    # Common Average Reference
    Sample[:,range(1,9)] = Process.Common_Average_Reference(Sample[:,range(1,9)])

    # Feature Extraction
    if ALPHA:
        Feature = np.append(Feature,Process.AlphaDifference(Sample,([5,6],[7,8]),250))
    else:
        Feature = np.append(Feature,Process.PowerExtraction(Sample[:,3],[8,12],250))

    return (Feature,Data_Array)
def main():
    trainData = pd.read_csv('CrimeClassification/Dataset/train-2.csv')
    classesMap = dm.mapClasses(trainData)
    print trainData.info()
    print(classesMap)
    cleanedTrainData,normalizationValues = dm.cleanTrainData(trainData,classesMap)
    print(cleanedTrainData.info())
    [Xtrain, Ytrain, Xtest,Ytest]=splitData(cleanedTrainData.values)
    model = trainModel(Xtrain,Ytrain)
    Ypred = testModel(model,Xtest)
    confMatrix = da.confusionMatrix(Ypred,Ytest)
    titleCM = da.orderClassesMapKeys(classesMap)
    da.plotConfusionMatrix(confMatrix,titleCM)
    print (da.f1Score(Ypred,Ytest))
示例#3
0
def saveTrial(trialDataArray, previousITI):

    global trialsSaved

    #If we stop the session prematurely, we need to know how many trials are in the saved session
    trialsSaved += 1

    #Convert trial data from numpy array to python list
    trialDataList = trialDataArray.tolist()

    #Create new trial (singular) object
    trialObject = {
        "trialNumber":
        trialsSaved,
        "previousITI":
        previousITI,
        "stats":
        da.getTrialStats(ts.currentSession.minVoltage,
                         ts.currentSession.thresholdSD,
                         ts.currentSession.thresholdMinDuration),
        "samples":
        trialDataList
    }

    #print(str(trialObject["previousITI"]))
    #Append trial (singular) object to trials (plural) array
    jsonObject["trials"].append(trialObject)
 def plotPolyRegression(self):
     x = str(self.dialog.featureX.currentText())
     y = str(self.dialog.featureY.currentText())
     order = int(self.dialog.order.currentText())
     descriptions = getDescriptions()
     if (x not in descriptions) or (y not in descriptions):
         self.errorLabel.setText("One or more feature names were invalid")
         self.dialog.close()
         self.dialog = None
         return
     try:
         self.coefs = da.polynomialRegression(x, y, order)
     except:
         self.errorLabel.setText(
             "An error occurred while loading or analyzing the data.")
         self.dialog.close()
         self.dialog = None
     if (type(self.coefs) == str):
         self.errorLabel.setText(self.coefs)
         self.dialog.close()
         self.dialog = None
     else:
         self.mode_linear = False
         self.mode_poly = True
         self.data = self.coefs[0]
         self.errorLabel.setText("")
         self.sc.update_figure(self.coefs, x, y, Poly=True)
         self.updateDataDisplay()
         self.dialog.close()
         self.dialog = None
示例#5
0
 def post(self):
     try:
         data = json_decode(self.request.body)
         # print(data)
         da = DataAnalysis.Vectorization('D:\\VSCodeTest\\Show\\static')
         # da = DataAnalysis.Vectorization('C:\\Users\\Administrator\\Desktop\\Dioxin_tornado_80\\static')
         format_matrix = da.get_format_matrix(data['key_string'])
         sim_dict = da.get_similarity_vector(format_matrix)
         case_description = da.get_case(sim_dict['max_sim'])
         img_dict = da.get_img(sim_dict['max_sim'])
         suggest_dict = da.get_suggest(sim_dict['max_sim'])
         order_str = da.get_order()
         format_dict = self.formatmatrix_to_dict(format_matrix)
         json_str = json.dumps({
             'format_dict': format_dict,
             'sim_dict': sim_dict,
             'case_description': case_description,
             'img_dict': img_dict,
             'suggest_dict': suggest_dict,
             'order_str': order_str,
             'status_code': str(200),
             'status_msg': '(^_^)'
         })
         time.sleep(1)
         assert len(data['key_string']) >= 50
         self.write(json_str)
     except Exception as e:
         print(e)
         rt_dict = {
             'status_code': str(404),
             'status_msg': 'AjaxHandler Post Error'
         }
         self.write(json.dumps(rt_dict))
示例#6
0
def buildMLModel(nameCSV):
    X_full = da.readCSV(nameCSV)

    y = X_full.Diabetes
    X = X_full.drop(['Diabetes'], axis=1)

    beside_list = ['Pregnancies']

    #ca.null_to_NaN(X, beside_list)
    # si=SimpleImputer(strategy='most_frequent')
    # si.fit(X,y)

    X_train, X_valid, y_train, y_valid = train_test_split(X,
                                                          y,
                                                          train_size=0.9,
                                                          test_size=0.1,
                                                          random_state=1)

    # results = {}
    # for i in range(1, 10):
    #     results[25 * i] = get_score(25 * i,  X_train, y_train)
    # n_estimators_best = min(results, key=results.get)
    # print(n_estimators_best)
    n_estimators_best = 70
    print(get_score(n_estimators_best, X_train, y_train, X_valid, y_valid))
示例#7
0
 def testhub(self):
     data = open('data_test.json').read()
     data = json.loads(data)
     result = DataAnalysis.findHub(data)
     print(result)
     self.assertEqual(result[0][0], 'Olivia Colman')
     self.assertEqual(result[0][1], 11)
示例#8
0
    def __init__(self, master=None):
        """
        Constructor of the Window object
        """

        # Initialize the database
        cwd = os.path.dirname(os.path.realpath(__file__))
        pathfile = os.path.join(cwd, '_path_to_folders')
        if not os.path.exists(pathfile):
            path = tkFileDialog.askdirectory()
            with open(pathfile, 'w') as f:
                f.write(path)
        else:
            with open(pathfile, 'r') as f:
                for l in f:
                    path = l.strip()
        self.qd = da.DataAnalysis(path=path)
        self.qd.slct_tags = tag_list

        #Initialize the filter dictionaries
        self.include = {}  # dictionary store including VARs
        self.exclude = {}  # dictionary store excluding VARs
        self.include_boxes = {}  # dictionary store excluding buttons
        self.exclude_boxes = {}  # dictionary store excluding buttons
        self.subwindows = []  # list of filter results window
        for tag in self.qd.slct_tags:
            self.include[tag] = []
            self.exclude[tag] = []
            self.include_boxes[tag] = []
            self.exclude_boxes[tag] = []

        # Initialize the frame and master
        Frame.__init__(self, master)
        self.master = master

        # Create a frame for the Chapter selection
        self.chp_frame = Frame(master)
        self.chp_frame.pack(fill=X)

        # Create a frame for key selections
        self.tags_frame = Frame(master)
        self.mycanvas1 = MyCanvas(self.tags_frame, 320)
        self.tags_frame.pack(fill=X)

        # Create a frame for score range entries
        self.score_frame = Frame(master)
        self.score_frame.pack(fill=BOTH, expand=True, anchor=W)

        # Create a frame for the Display messages
        self.display_frame = Frame(master)
        self.mycanvas2 = MyCanvas(self.display_frame, 100, 'lightgreen')
        self.display_frame.pack(fill=BOTH, anchor=W, expand=True)

        # Create a frame for control buttons
        self.button_frame = Frame(master)
        self.button_frame.pack(fill=BOTH, expand=True, anchor=W)

        # Initilize the window
        self.init_window()
示例#9
0
    def initialize_parameters_from_config_file(self, dir, config):
        # TODO: Should I access the config directly? Or store variables here.
        self.config = config

        # Variables for storing data
        self.BEST_INDIVIDUAL_FILE = "elite"
        self.POPULATION_FILE = "pop"
        self.SAVE_FILE_DIRECTORY = os.path.join(dir, 's_')
        self.CHECKPOINT_FREQUENCY = int(
            config['experiment']['checkpoint_frequency'])

        # Keeping track of evolutionary progression
        self.EVALUATION_NR = 0
        self.POPULATION_SIZE = int(config['ea']['batch_size'])

        # Mutation rates
        self.MUTATION_RATE = float(config['ea']['mutation_prob'])
        self.MORPH_MUTATION_RATE = float(config['ea']['morphmutation_prob'])
        self.MUT_SIGMA = float(config['ea']['mutation_sigma'])
        self.TREE_DEPTH = int(config['morphology']['max_depth'])

        #
        print("Mutation rates - ", " control: ", self.MUTATION_RATE,
              ", morphology: ", self.MORPH_MUTATION_RATE, ", sigma: ",
              self.MUT_SIGMA)

        # Wall of death speed
        self.WOD_SPEED = float(config['evaluation']['wod_speed'])

        # This parameter is used for showing the best individual every generation.
        # NOTE: this apparently doesn't work when headlessly simulating the rest
        self.show_best = False
        if (int(config['ea']['show_best']) == 1):
            self.show_best = True
        self.headless = False
        if (int(config['ea']['headless']) == 1):
            self.headless = True
        self.load_best = False
        if (int(config['ea']['load_best']) == 1):
            self.load_best = True
        # plots the virtual creates at every <interval> frames
        self.interval = int(config['ea']['interval'])

        # Elements for visualization
        # plot fitness over time
        self.PLOT_FITNESS = False
        if (int(config['visualization']['v_progression']) == 1):
            self.PLOT_FITNESS = True
            self.plotter = da.Plotter()
        # plot tree structure of current individual being evaluated (for debugging)
        self.PLOT_TREE = False
        if (int(config['visualization']['v_tree']) == 1):
            """ Deprecated debug function """
            print(
                "Note: visualization of the tree structure was set to true, this is not functional in this version."
            )
            self.PLOT_TREE = False
示例#10
0
 def filterData(self):
     threshs = []
     feat1s = []
     feat2s = []
     logics = []
     numfilters = self.dialog.counter + 1
     for i in range(numfilters):
         threshs.append(self.dialog.threshold[i].text())
         feat1s.append(self.dialog.feature1[i].currentText())
         feat2s.append(self.dialog.feature2[i].currentText())
         logics.append(self.dialog.logic[i].currentText())
     for i in range(numfilters):
         try:
             threshs[i] = float(str(threshs[i]))
         except:
             threshs[i] = str(threshs[i])
         feat1s[i] = str(feat1s[i])
         feat2s[i] = str(feat2s[i])
         logics[i] = str(logics[i])
     descriptions = getDescriptions()
     for i in range(numfilters):
         if (feat2s[i] not in descriptions) \
                 or ((feat1s[i] not in descriptions) and (i == 0)):
             self.errorLabel.setText(
                 "One or more of the feature names were invalid.")
             self.dialog.close()
             self.dialog = None
             return
         if type(threshs[i]) is str:
             if threshs[i] == "":
                 self.errorLabel.setText(
                     "One or more threshold value was empty")
                 self.dialog.close()
                 self.dialog = None
                 return
     try:
         filterResult = da.filtering(
             feat1s[0], feat2s, logics, threshs,
             [feat1s[i] for i in range(1, numfilters)])
     except:
         self.errorLabel.setText(
             "An error occurred while loading or analyzing the data.")
         self.dialog.close()
         self.dialog = None
     if type(filterResult) == str:
         self.errorLabel.setText(filterResult)
         self.dialog.close()
         self.dialog = None
     else:
         self.mode_linear = False
         self.mode_poly = False
         self.sc.clear_figure()
         self.errorLabel.setText("")
         self.data = filterResult[1]
         self.updateDataDisplay()
         self.dialog.close()
         self.dialog = None
示例#11
0
def main():
    # clearing console screen and printing info
    clear()
    info()

    # getting info from data.csv
    expert_count = FileReader.get_expert_count()
    csv_data, line_count = FileReader.get_data_and_line_count()

    # deleting commentaries and non-criteria info from dictionaries
    criteria_count, criteria_dict = DataAnalysis.get_criteria_dict_and_criteria_count(
        csv_data)

    # splitting criterias to blocks and counting concordance for blocks
    blocks_dict = DataAnalysis.split_blocks(criteria_dict)
    concordance_by_block = {}
    for block in blocks_dict:
        sum_on_criteria_dict = DataAnalysis.get_sum_on_criteria_dict(
            blocks_dict[block])
        overall_average_mark = DataAnalysis.get_overall_average_mark(
            sum_on_criteria_dict, len(blocks_dict[block]))
        squared_difference_sum = DataAnalysis.get_squared_difference_sum(
            sum_on_criteria_dict, overall_average_mark)
        concordance_by_block[block] = float(
            DataAnalysis.get_concordance(squared_difference_sum, expert_count,
                                         len(blocks_dict[block])))

    # counting gaussian_distribution
    gaussian_distribution_for_criteria = {}
    mathematical_expectation = DataAnalysis.get_average_on_criteria(
        criteria_dict, expert_count)
    for criteria in criteria_dict:
        avg_squared_diff, dispersion = DataAnalysis.get_avg_squared_diff_and_dispersion(
            criteria_dict[criteria], mathematical_expectation[criteria],
            expert_count)

        gaussian_distribution_for_criteria[
            criteria] = DataAnalysis.get_gaussian_distribution(
                dispersion, avg_squared_diff,
                mathematical_expectation[criteria], criteria_dict[criteria])

    # putting data into output files
    FileReader.write_dict_to_csv(concordance_by_block,
                                 'concordance_output.csv')
    FileReader.write_dict_to_csv(gaussian_distribution_for_criteria,
                                 'gaussian_distribution_output.csv')

    print("Done. Check files.")
示例#12
0
def main() -> None:
    option = ' '
    while option != '0':
        print(''' Select the option:
        1 - Register
        2 - Report
        3 - Data Analysis
        0 - Exit ''')
        option = input('option: ').strip()
        if option == '0':
            continue
        elif option == '1':
            menuRegister.menu()
        elif option == '2':
            Report.menu()
        elif option == '3':
            DataAnalysis.menu()
        else:
            print('\33[1;31mInvalid option\33[m')
    print('Thanks')
示例#13
0
def buildMLModel(nameCSV):
    X_full = da.readCSV(nameCSV)
    beside_list = ['Pregnancies']
    y = X_full.Diabetes
    X = ca.null_to_NaN(X_full.drop(['Diabetes'], axis=1), beside_list)
    X_train, X_valid, y_train, y_valid = train_test_split(X,
                                                          y,
                                                          train_size=0.85,
                                                          test_size=0.15)

    my_model = XGBRegressor(n_estimators=1000, learning_rate=0.05)
    my_model.fit(X_train,
                 y_train,
                 early_stopping_rounds=5,
                 eval_set=[(X_valid, y_valid)],
                 verbose=False)
    predictions = my_model.predict(X_valid)
    print("Mean Absolute Error: " +
          str(mean_absolute_error(predictions, y_valid)))
示例#14
0
def write_pot_file(filename, derivative, potential, numofbins, DeltaR, ii, iii, index, offset=0, smoothing="yes", selection="yes"):

  print "WRITING POT FILE AT:", filename
  OUTPUT_FILE_POT = open (filename, 'w')       
  # outputting the updated potential and forces to a new LAMMPS potential file 
  OUTPUT_FILE_POT.write("# POTENTIAL FOR type %d and type %d\n\nTABLE_%d.%d\nN %d\n\n" % (ii, iii, ii, iii, index)) 
  #OUTPUT_FILE_POT.write("# POTENTIAL WITH NAME %s with %d bins.\n\n\n\n\n" % (filename, numofbins))
  index = 0 
  
  filtered_derivative = np.append(derivative,[0])
  filtered_potential  = potential
  if smoothing=="yes":
    #filtered_derivative = dm.smooth_data(np.append(derivative,[0]))
    #workaround to improve smoothing
    potential[0] = potential[1]
    #filtered_potential  = dm.smooth_data(filtered_potential,window_len=7,window='hamming') #Temporary 
    filtered_potential  = dm.smooth_data(filtered_potential) 

    # add thresholding to avoid smoothing of charged particles at low distances
    #for i in xrange(0, len(filtered_potential)):
    #  if np.abs(filtered_potential[i]) > 10.0:
    #    blend_factor = np.log(10.0)/np.log(np.abs(filtered_potential[i]))
    #    filtered_potential[i] = filtered_potential[i]*blend_factor + potential[i]*(1.0-blend_factor)
    filtered_derivative = da.derivatives(np.arange(offset, offset+(numofbins+1)*DeltaR, DeltaR),filtered_potential)
 
  print "LEN DERIVATIVE", len(filtered_derivative), len(filtered_potential), "smoothing: ", smoothing
  
  for i in xrange(0, numofbins+1):
    #          if i>340:
    #            print i*DeltaR, derivative[ii][iii][i], len(dy)
    if selection=="yes" and len(derivative) > i:
      if abs(derivative[i]) > 0:
        #              print i, index, i*DeltaR, len(derivative[ii][iii]), numofbins, len(filtered_potential), len(filtered_derivative)
        index += 1
        OUTPUT_FILE_POT.write("%d\t%f\t%f\t%f   \n" % (index, i*DeltaR+offset, filtered_potential[i], filtered_derivative[i]*-1))
    if selection=="no" and i < numofbins:
      index += 1
      OUTPUT_FILE_POT.write("%d\t%f\t%f\t%f   \n" % (index, i*DeltaR+offset, filtered_potential[i], filtered_derivative[i]*-1))
  
  OUTPUT_FILE_POT.close()
示例#15
0
	def __init__(self, config, dir):
		print(config.items())
		
		self.start_time = datetime.datetime.now()
		self.time = datetime.datetime.now()
		self.initialize_parameters_from_config_file(dir,config)
		self.fitnessData = da.FitnessData() # stores data of the progression

		# TODO take from configuration file
		self.EVALUATION_STEPS = 10000
		self.TOTAL_EVALUATIONS = 50000
		self.SAVEDATA = True

		# Initializing modules
		self.moduleList = get_module_list() # stores which module types to select from. This list is mutated using the L-System


		if self.load_best:
			print("Loading best")
			individual = pickle.load(open(self.SAVE_FILE_DIRECTORY + self.BEST_INDIVIDUAL_FILE,"rb"))
			for i in range(20):
				evaluate(individual,HEADLESS = False,INTERVAL =2 + i)
示例#16
0
 def plotLinearRegression(self):
     x = str(self.dialog.featureX.currentText())
     y = str(self.dialog.featureY.currentText())
     descriptions = getDescriptions()
     if (x not in descriptions) or (y not in descriptions):
         self.errorLabel.setText("One or more feature names were invalid")
         self.dialog.close()
         self.dialog = None
         return
     try:
         self.coefs = da.linearRegression(x, y)
     except:
         self.errorLabel.setText(
             "An error occurred while loading or analyzing the data.")
         self.dialog.close()
         self.dialog = None
     self.checkyint = self.dialog.yIntercept.isChecked()
     self.checkslope = self.dialog.slopeCheck.isChecked()
     self.checkrsquare = self.dialog.rSquared.isChecked()
     if (type(self.coefs) == str):
         self.errorLabel.setText(self.coefs)
         self.dialog.close()
         self.dialog = None
     else:
         self.mode_linear = True
         self.mode_poly = False
         self.data = self.coefs[0]
         self.errorLabel.setText("")
         self.sc.update_figure(self.coefs,
                               x,
                               y,
                               Linear=True,
                               yint=self.dialog.yIntercept.isChecked(),
                               slope=self.dialog.slopeCheck.isChecked(),
                               rsquare=self.dialog.rSquared.isChecked())
         self.updateDataDisplay()
         self.dialog.close()
         self.dialog = None
示例#17
0
    def __init__(self, filename, parent=None):
        super(MapDisplay, self).__init__(parent)
        print('Loading Data...')
        self.map = DataAnalysis.map(filename)
        print('Total {} nodes, {} ways.'.format(len(self.map.cross_list.nodes),
                                                len(self.map.ways)))
        self.size_x = 680
        self.size_y = 830
        self.show_path = False
        self.zoom = [0, 0]  # 第一个代表鼠标滚动之前(过去状态),第二个代表鼠标滚动之后(当前状态)
        self.max_zoom = 50
        self.zoom_ratio = 1.1
        self.mouse = QPoint(0, 0)  # 发生事件时鼠标在屏幕上的位置(相对窗口左上角)
        self.top_left = node('5', lat_min, lon_min)  # 图片左上角坐标
        self.top_left.cartesian_coordinate(self.map.cross_list.origin)
        self.is_zoom = 0  # 缩放事件标记
        self.is_press = 0  # 鼠标按下事件标记
        self.before_drag = QPoint(0, 0)
        self.road_list = None
        self.min_distance = None
        self.intro = ""

        self.initUI()
示例#18
0
文件: main.py 项目: 1610337/PML
def get_filtered_data():

    df, returnStr = DataAnalysis.get_filtered_data()

    print(df.head())
    returnStr += str(df.head())

    # create a new feature
    print(df.head())
    df['CalcFeature'] = df['petal_length'] * df['petal_width']
    df = df.drop(['petal_length', 'petal_width'], axis=1)
    print(df.head())

    # get a dataframe without the species column
    df_feat = df.drop(columns=['species'])

    # split dataset into training and testing sets
    X = df_feat

    y = df['species']
    # random_state = seed for random values
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)

    return X_train, X_test, y_train, y_test, returnStr
def MainPage(root, photo):
    root.title('Consumer Complaint Analysis - Main Page')
    MainPageFrame = Frame(root)
    MainPageFrame.pack(side=LEFT)

    # Side Image for MainPage
    ImageFrame = Frame(root)
    ImageFrame.pack(side=RIGHT)
    ImageLabel = Label(ImageFrame, image=photo)
    ImageLabel.pack(side=TOP)

    # Functions
    # Fetch/Update Data Button Function
    ###################################Fetch Data Starts####################################################
    def FetchData(root, ImageFrame, ImageLabel, photo, MainPageFrame,
                  FetchDataButton, ViewLogButton):
        #Start the Fetching Process
        InitialParameters = Initialize.getParam()
        LoadFileName = InitialParameters[15]

        #Disable the Button and destroy the Image
        FetchDataButton.config(state=DISABLED)
        ViewLogButton.config(state=DISABLED)
        ImageLabel.destroy()

        #Create Top and Bottom Frame
        TopFetchFrame = Frame(ImageFrame)
        TopFetchFrame.pack(side=TOP)
        BottopFetchFrame = Frame(ImageFrame)
        BottopFetchFrame.pack(side=BOTTOM)

        #Label to Display Log with Scroll Bar

        #Function for Scroll Bar
        def ScrollLogFunc(event):
            FetchCanvas.configure(scrollregion=FetchCanvas.bbox("all"),
                                  height=370,
                                  width=700)

        #canvas to Implement Scroll Bar for Log
        FetchCanvas = Canvas(TopFetchFrame)
        CanvasFrame = Frame(FetchCanvas)
        ScrollLogY = Scrollbar(TopFetchFrame,
                               orient="vertical",
                               command=FetchCanvas.yview)
        FetchCanvas.configure(yscrollcommand=ScrollLogY.set)
        FetchCanvas.pack(side=LEFT)
        ScrollLogY.pack(side=RIGHT, fill=Y)
        FetchCanvas.create_window((0, 0), window=CanvasFrame, anchor=NW)

        #Label to display the Log Details
        LogContentDisplay = Label(CanvasFrame,
                                  text='Loading Log Details . . ..',
                                  anchor=NW,
                                  justify=LEFT,
                                  wraplength=650)
        CanvasFrame.bind("<Configure>", ScrollLogFunc)
        LogContentDisplay.pack()

        #Function to Fetch Log Details from Log File - continuesly
        def DisplayLogContent():
            #global LogValue
            CurrentLogValue = ''
            InitialParameters = Initialize.getParam()
            LogFileName = InitialParameters[14]
            try:
                LogFile = open(LogFileName, 'r')
            except Exception as e:
                #print('Log File Error : ',e)
                CurrentLogValue = 'Unable to Display Log.Please Wait . . .'
            else:
                CurrentLogValue = LogFile.read()
            LogContentDisplay.config(text=CurrentLogValue)
            LogContentDisplay.after(200, DisplayLogContent)

        #run the Fetch Log function
        DisplayLogContent()

        #Clear the Fetch Data Screen and revert to Default Main Page
        def DisplayLogClear(root, photo, ImageFrame, MainPageFrame):
            ImageFrame.destroy()
            MainPageFrame.destroy()
            MainPage(root, photo)

        #Button to Clear the Screen and revert back
        BackButton = ttk.Button(BottopFetchFrame,
                                text='Back',
                                width=20,
                                command=lambda: DisplayLogClear(
                                    root, photo, ImageFrame, MainPageFrame))
        BackButton.pack(side=LEFT, padx=50)

        #Runthe Update Process using thread

        if os.path.isfile(LoadFileName) == True:
            popupmsg('Update Already in Progress !!!')
        else:
            Choice = messagebox.askquestion(
                "Update",
                "Are You sure you want to update the Main DB Tables?",
                icon='warning')
            if Choice == 'yes':
                threading.Thread(target=FD.main).start()
            else:
                DisplayLogClear(root, photo, ImageFrame, MainPageFrame)

    ###################################Fetch Data Ended####################################################

    ###################################View Log Starts####################################################
    def ViewLog(root, ImageFrame, ImageLabel, photo, MainPageFrame,
                ViewLogButton, FetchDataButton):
        #Disable the Button and destroy the Image
        ViewLogButton.config(state=DISABLED)
        FetchDataButton.config(state=DISABLED)
        ImageLabel.destroy()

        #Create Top and Bottom Frame
        TopViewFrame = Frame(ImageFrame)
        TopViewFrame.pack(side=TOP)
        BottopViewFrame = Frame(ImageFrame)
        BottopViewFrame.pack(side=BOTTOM)

        #Label to Display Log with Scroll Bar

        #Function for Scroll Bar
        def ScrollLogFunc(event):
            ViewCanvas.configure(scrollregion=ViewCanvas.bbox("all"),
                                 height=370,
                                 width=800)

        #canvas to Implement Scroll Bar for Log
        ViewCanvas = Canvas(TopViewFrame)
        CanvasFrame = Frame(ViewCanvas)
        ScrollLogY = Scrollbar(TopViewFrame,
                               orient="vertical",
                               command=ViewCanvas.yview)
        ViewCanvas.configure(yscrollcommand=ScrollLogY.set)
        ViewCanvas.pack(side=LEFT)
        ScrollLogY.pack(side=RIGHT, fill=Y)
        ViewCanvas.create_window((0, 0), window=CanvasFrame, anchor=NW)

        #Label to display the Log Details
        ViewLogDisplay = Label(CanvasFrame,
                               text='Please Select a Log Category.',
                               anchor=NW,
                               justify=LEFT,
                               wraplength=700)
        CanvasFrame.bind("<Configure>", ScrollLogFunc)
        ViewLogDisplay.pack(side=BOTTOM)

        #DropDown Button For Archive Log
        var = StringVar()
        var.set('')
        InitialParameters = Initialize.getParam()
        LogPath = InitialParameters[19]
        #LogPath=r'C:\Users\Priyank\Desktop\MIS\Scripting Languages\DataAnalyst-Project(Python)\Coding\Logs'
        LogList = []
        for filename in os.listdir(LogPath):
            #if filename.split('.')[-1]=='log' and filename.split('_')[0]=='DataUpdateLog':
            LogList.append(filename)

        def func(value):
            LogFile = LogPath + '\\' + str(value)
            Log = open(LogFile, 'r')
            LogValue = Log.read()
            ViewLogDisplay.config(text=LogValue)
            ChoiceLabel.config(text='Archived Process Logs -')

        ChoiceLabel = Label(CanvasFrame, text='LOGS !!!')
        ChoiceLabel.pack(side=LEFT)
        ChoiceDropDown = OptionMenu(CanvasFrame, var, *LogList, command=func)
        ChoiceDropDown.pack(side=RIGHT)

        #Function to Display the Logs based on selection
        #APi and Source file info from Log table
        def APILog(ChoiceLabel):
            ChoiceLabel.config(text='Source File and API Logs -')
            var.set('')
            APLDataFrame = FirebirdDB.DisplayAPILog()
            APLDataFrame['FileLen'] = APLDataFrame.FileName.map(len)
            MaxLength = 0
            for i in range(0, len(APLDataFrame)):
                if APLDataFrame.loc[i, 'FileLen'] > MaxLength:
                    MaxLength = APLDataFrame.loc[i, 'FileLen']

            for i in range(0, len(APLDataFrame)):
                APLDataFrame.loc[i, 'FileName'] = str(
                    APLDataFrame.loc[i, 'FileName']).ljust(MaxLength, '_')
                m, s = divmod(APLDataFrame.loc[i, 'Duration'], 60)
                h, m = divmod(m, 60)
                DurationTime = str(int(h)).rjust(2, '0') + ':' + str(
                    int(m)).rjust(2, '0') + ':' + str(int(s)).rjust(2, '0')
                APLDataFrame.loc[i, 'Duration'] = DurationTime
                #print(APLDataFrame.loc[i,'LoadStartDTTM'])
                APLDataFrame.loc[
                    i, 'LoadStartDTTM'] = datetime.datetime.strptime(
                        APLDataFrame.loc[i, 'LoadStartDTTM'],
                        "%Y-%m-%d %H:%M:%S.%f").strftime("%d %b %y %H:%M:%S")
                APLDataFrame.loc[i, 'LoadStartDTTM'] = str(
                    APLDataFrame.loc[i, 'LoadStartDTTM']).ljust(30,
                                                                ' ').upper()
                #print(datetime.datetime.strptime(APLDataFrame.loc[i,'LoadStartDTTM']),"%d/%m/%Y").strftime("%d%b%y"))
                APLDataFrame.loc[i, 'LoadIndex'] = str(
                    int(APLDataFrame.loc[i, 'LoadIndex']))
                APLDataFrame.loc[i, 'LoadOrder'] = str(
                    int(APLDataFrame.loc[i, 'LoadOrder']))
                APLDataFrame.loc[i, 'RecordCount'] = str(
                    int(APLDataFrame.loc[i, 'RecordCount']))

            ViewLogDisplay.config(
                text=
                '--------------------------------------------------------------------------------'
                '-----------------------------------------------\n'
                'Index\tOrder\t' + 'FileName'.ljust(MaxLength, '_') +
                '\tCount\tStartTime\t\tDuration\n' +
                '--------------------------------------------------------------------------------'
                '-----------------------------------------------\n' +
                APLDataFrame.to_csv(
                    sep='\t',
                    index=False,
                    header=False,
                    columns=[
                        'LoadIndex', 'LoadOrder', 'FileName', 'RecordCount',
                        'LoadStartDTTM', 'Duration'
                    ]) +
                '--------------------------------------------------------------------------------'
                '-----------------------------------------------')

        #Load Step info from Main Log
        def LoadStep(ChoiceLabel):
            ChoiceLabel.config(text='Load Step Logs -')
            var.set('')
            LoadStepDataFrame = FirebirdDB.DisplayLoadStepLog()
            LoadStepDataFrame['StatusLen'] = LoadStepDataFrame.Status.map(len)
            MaxStatus = 0

            for i in range(0, len(LoadStepDataFrame)):
                if LoadStepDataFrame.loc[i, 'StatusLen'] > MaxStatus:
                    MaxStatus = LoadStepDataFrame.loc[i, 'StatusLen']

            for i in range(0, len(LoadStepDataFrame)):
                LoadStepDataFrame.loc[i, 'Status'] = str(
                    LoadStepDataFrame.loc[i, 'Status']).ljust(MaxStatus, '_')
                LoadStepDataFrame.loc[i, 'LoadIndex'] = str(
                    int(LoadStepDataFrame.loc[i, 'LoadIndex']))
                LoadStepDataFrame.loc[i, 'LoadOrder'] = str(
                    int(LoadStepDataFrame.loc[i, 'LoadOrder']))
                LoadStepDataFrame.loc[i, 'RecordCount'] = str(
                    int(LoadStepDataFrame.loc[i, 'RecordCount']))
                LoadStepDataFrame.loc[
                    i, 'LoadStartDTTM'] = datetime.datetime.strptime(
                        LoadStepDataFrame.loc[i, 'LoadStartDTTM'],
                        "%Y-%m-%d %H:%M:%S.%f").strftime("%d %b %y %H:%M:%S")
                LoadStepDataFrame.loc[i, 'LoadStartDTTM'] = str(
                    LoadStepDataFrame.loc[i,
                                          'LoadStartDTTM']).ljust(30,
                                                                  ' ').upper()
                m, s = divmod(LoadStepDataFrame.loc[i, 'Duration'], 60)
                h, m = divmod(m, 60)
                DurationTime = str(int(h)).rjust(2, '0') + ':' + str(
                    int(m)).rjust(2, '0') + ':' + str(int(s)).rjust(2, '0')
                LoadStepDataFrame.loc[i, 'Duration'] = DurationTime

            ViewLogDisplay.config(
                text=
                '--------------------------------------------------------------------------------'
                '----------------------------------------------------------\n'
                'Index\tOrder\t' + 'Status'.ljust(MaxStatus, '_') +
                '\tCount\tStartTime\t\tDuration\n' +
                '--------------------------------------------------------------------------------'
                '----------------------------------------------------------\n'
                + LoadStepDataFrame.to_csv(
                    sep='\t',
                    index=False,
                    header=False,
                    columns=[
                        'LoadIndex', 'LoadOrder', 'Status', 'RecordCount',
                        'LoadStartDTTM', 'Duration'
                    ]) +
                '--------------------------------------------------------------------------------'
                '----------------------------------------------------------')

        #Display Log details from Archived files/Rejects
        def ProcessLog():
            ChoiceLabel.config(text='Archived Process Logs -')
            ViewLogDisplay.config(text='Please Select an Archive File !!!')
            var.set('')

        #Clear the View Log Screen and revert to Default Main Page
        def DisplayLogClear(root, photo, ImageFrame, MainPageFrame):
            ImageFrame.destroy()
            MainPageFrame.destroy()
            MainPage(root, photo)

        #Buttions to choose log type
        APILogButton = ttk.Button(BottopViewFrame,
                                  text='API Logs',
                                  width=20,
                                  command=lambda: APILog(ChoiceLabel))
        LoadStepButton = ttk.Button(BottopViewFrame,
                                    text='Load Step Logs',
                                    width=20,
                                    command=lambda: LoadStep(ChoiceLabel))
        ProcessLogButton = ttk.Button(BottopViewFrame,
                                      text='Process Logs/Rejects',
                                      width=20,
                                      command=ProcessLog)
        BackButton = ttk.Button(BottopViewFrame,
                                text='Back',
                                width=20,
                                command=lambda: DisplayLogClear(
                                    root, photo, ImageFrame, MainPageFrame))

        #pack Buttons
        APILogButton.pack(side=LEFT, padx=20)
        LoadStepButton.pack(side=LEFT, padx=20)
        ProcessLogButton.pack(side=LEFT, padx=20)
        BackButton.pack(side=LEFT, padx=20)

    ###################################View Log Ended####################################################

    # Create Buttions to Perform Tasks
    FetchDataButton = ttk.Button(
        MainPageFrame,
        text='Fetch/Update Database',
        width=40,
        command=lambda: FetchData(root, ImageFrame, ImageLabel, photo,
                                  MainPageFrame, FetchDataButton, ViewLogButton
                                  ))
    ViewLogButton = ttk.Button(
        MainPageFrame,
        text='View Log History',
        width=40,
        command=lambda: ViewLog(root, ImageFrame, ImageLabel, photo,
                                MainPageFrame, ViewLogButton, FetchDataButton))
    DataAnalystButton = ttk.Button(MainPageFrame,
                                   text='Data Analyst(Consumer Complaint)',
                                   command=lambda: DataAnalysis.AnalystMain(
                                       root, ImageFrame, MainPageFrame),
                                   width=40)
    MainExitButton = ttk.Button(MainPageFrame,
                                text='Exit',
                                width=40,
                                command=root.quit)

    SpaceLabel0 = Label(MainPageFrame, text='', height=3)
    SpaceLabel1 = Label(MainPageFrame, text='', height=3)
    SpaceLabel2 = Label(MainPageFrame, text='', height=3)
    SpaceLabel3 = Label(MainPageFrame, text='', height=3)
    SpaceLabel4 = Label(MainPageFrame, text='', height=3)

    #Clock
    clock = Label(MainPageFrame, font=CLOCK_FONT, anchor=W)
    clock.pack(side=TOP, fill=BOTH, expand=1)

    def tick():
        global TIMER
        #CurrTime = time.strftime('%H:%M:%S')
        CurrTime = time.strftime('%d %b %Y %X')
        if CurrTime != TIMER:
            Timer = CurrTime
            clock.config(text=CurrTime)
        clock.after(200, tick)

    #run Clock
    tick()

    # Display the Buttons on Main Page
    SpaceLabel0.pack()
    FetchDataButton.pack()
    SpaceLabel1.pack()
    ViewLogButton.pack()
    SpaceLabel2.pack()
    DataAnalystButton.pack()
    SpaceLabel3.pack()
    MainExitButton.pack()
    SpaceLabel4.pack()

    # Status Bar Functions
    def NormStatus(event):
        status.config(text='Consumer Complaint Analysis', anchor=NE)

    def FetchStatus(event):
        status.config(
            text=
            'Fetch Consumer Complaint Data from http://data.consumerfinance.gov/ site and Update the Database Tables.',
            anchor=NW,
            justify=LEFT,
            wraplength=280)

    def ViewLogStatus(event):
        status.config(
            text=
            'View Log History of all the All Database updates from http://data.consumerfinance.gov/ site.',
            anchor=NW,
            justify=LEFT,
            wraplength=280)

    def DataAnalystStatus(event):
        status.config(
            text=
            'Click to Analyse in Detail the Comsumer Complaint Data ,fetch Reports and Visualize Data.',
            anchor=NW,
            justify=LEFT,
            wraplength=280)

    def ExitStatus(event):
        status.config(text='Exit the Consumer Complaint Analyst Program',
                      anchor=NW,
                      justify=LEFT,
                      wraplength=280)

    #status Bar Bottom
    status = Label(MainPageFrame,
                   text='Consumer Complaint Analysis',
                   bd=1,
                   relief=SUNKEN,
                   anchor=NE,
                   height=4,
                   width=40)
    status.pack(side=BOTTOM, fill=X)

    #Binding Status Functions
    FetchDataButton.bind('<Enter>', FetchStatus)
    FetchDataButton.bind('<Leave>', NormStatus)
    ViewLogButton.bind('<Enter>', ViewLogStatus)
    ViewLogButton.bind('<Leave>', NormStatus)
    DataAnalystButton.bind('<Enter>', DataAnalystStatus)
    DataAnalystButton.bind('<Leave>', NormStatus)
    MainExitButton.bind('<Enter>', ExitStatus)
    MainExitButton.bind('<Leave>', NormStatus)
示例#20
0
detector_dist         = 3289.95            # pixels
true_center           = [1020.67, 1024.61] # [row, column] of detector image center in pixels (shifted by 1 for python index)
e_rng                 = [-0.012, 0.012]    # elastic strain range
p_rng                 = [-0.024, 0.024]    # plastic strain range
t_rng                 = [-0.036, 0.036]    # total strain range
E, G, v               =  71.7, 26.9, 0.33  # elastic modulus (GPa), shear modulus (GPa), poisson's ratio

ring_name             = 'al_311'
radius                = 718                # ring radius in pixels
dr                    = 30                 # half of ring width in pixels
min_amp               = 25                 # minimum acceptable peak amplitude
vec_frac              = 0.25               # fraction of peaks that must be acceptable

sample    = DA.Specimen(specimen_name, data_dir, out_dir,
                        step_names, dic_files, dark_dirs, 
                        init_dirs, dic_center, 
                        x_range,x_num, y_range, y_num,
                        detector_dist, true_center, 
                        e_rng, p_rng, t_rng, E, G, v)   

#%% Lasso fitting for each ring
# Specify image data
load_step_list = [3]
img_num_list = range(23,24)

num_theta = 2400
dtheta = 2*np.pi/num_theta
num_var = 100

var_domain = np.linspace((dtheta),(np.pi/16),num_var)**2

basis_path = os.path.join('basis_radial_sum','gaus_basis_shift_')
示例#21
0
 def __init__(self):
     self.dbController = DBController.DBController()
     self.dataAnalysis = DataAnalysis.DataAnalysis()
示例#22
0
 Title: Analyze user patterns using Data(instacart-market-basket-analysis/Kaggle)
'''
# install Library List
## pip3 install pandas
## pip3 install numpy
## pip3 install matplotlib

# Import Library
## Data Preprocessing Classes
import Preprocessing
## Classes related to analysis and visualization
import DataAnalysis

# Class declaration
prepro = Preprocessing.Preprocessing()
analysis = DataAnalysis.DataAnalysis()

# Running Process
while True:
    print('Run Data PreProcessing ▶ 1')
    print('Run Analysis and Visualization ▶ 2')
    choice = int(input('Choose the Job:'))

    if choice == 1:
        # Run Data Preprocessing
        ## num: PreProcessing(-1), PreProcessing & Save(0), Read Sample Data(1)
        ## user: Specify user counts when saving
        n = int(
            input(
                'PreProcessing(-1), PreProcessing & Save(0), Read Sample Data(1):'
            ))
示例#23
0
#Set path for libaries on server
#import sys
#sys.path.insert(0, "/home/student/anaconda3/lib/python3.4/site-packages")

import MakeLogDataDB
import makeSurveyDB
import makeTimeTableDB
import glob, os
import wicount
import db
import DataAnalysis

#Connect to the database
con = db.get_connection()
c = con.cursor()
# Create all the database tables
wicount.SetUpDatabase()
MakeLogDataDB.main()
makeSurveyDB.main()
makeTimeTableDB.main()

#Run the analytics algorithm
DataAnalysis.main()

#Close the database connection
con.close()
示例#24
0
import CoreActions as ca
import DataAnalysis as da
import LearningModel as lm
import PrepareDataset as pds

if __name__ == '__main__':
    nameXLS = 'Diabetes.xls'
    nameCSV = 'csvData.csv'

    ca.convertXLSToCSV(nameCSV, nameXLS)

    da.describeData(nameCSV)
    #da.detectNullVal(nameCSV)
    pds.buildMLModel(nameCSV)

    #lm.buildMLModel(nameCSV)
示例#25
0
ring_name = 'al_111'
left = [372, 432]
right = [1623, 1683]
top = []
bottom = []
lambda_sel = 2.0
#%%
ring_name = 'al_311'
left = [277, 337]
right = [1713, 1773]
top = [274, 334]
bottom = [1709, 1769]
lambda_sel = 2.0
#%%
sample = DataAnalysis.Specimen(specimen_name, data_dir, out_dir, x_range,
                               x_num, z_range, z_num, step_names, dark_dirs,
                               init_dirs)
ring = DataAnalysis.Ring(ring_name, sample, left, right, top, bottom,
                         lambda_sel)

if specimen_name == 'ti64_notched' or specimen_name == 'ti64_plain':
    xa = np.linspace(sample.x_range[1],
                     sample.x_range[0],
                     num=x_num,
                     endpoint=True)
    za = np.linspace(sample.z_range[1],
                     sample.z_range[0],
                     num=z_num,
                     endpoint=True)
    z2d, x2d = np.meshgrid(za, xa)
if specimen_name == 'al7075_plain':
示例#26
0
文件: main.py 项目: mimocha/kaggle
"""
	Kaggle Titanic Competition -- 2020-02-02
	kaggle/mimocha
	github/mimocha
	Chawit Leosrisook
"""
""" =========================================================================================== """
""" ======================================= MAIN SCRIPT ======================================= """
""" =========================================================================================== """

import DataAnalysis
import DataHandler

filename = './data/train.csv'
rawData = DataHandler.ReadCSV(filename)

# Preliminary Data Analysis
DataAnalysis.Preliminary(rawData)
示例#27
0
t = numpy.linspace(0, sim_time, nsamps)
freqs = [0.1, 0.5, 1, 4]
x = 0
for i in range(len(freqs)):
  x += numpy.cos(2*math.pi*freqs[i]*t)
time_dom = fig.add_subplot(232)

fig.add_subplot(233)
pyplot.plot(t, x)
# pyplot.ylim([-5,5])
pyplot.title('Filter Input - Time Domain')
pyplot.grid(True)

f = h5py.File("test.hdf5", "w")

dset = f.create_dataset("test", data= x)
f.flush()
f.close()

da = DataAnalysis.data_analysis()
da.load_hdf5("test", "test", dataset_rename="test")
da.high_pass_filter(samp_rate, order= 7)


fig.add_subplot(236)
pyplot.plot(t, da.data)
pyplot.title('Filter Output - Time Domain')
pyplot.grid(True)

pyplot.show()
示例#28
0
x1d, z1d              = x2d.flatten(), z2d.flatten()
#%%    read peak diameters if they have been fit, if not fit here

orient                = 'h'
try:
    x, z, diams = [], [], []
    for i_step in range(sample.n_load_step):
        txt_data          = DataReader.read_data_from_text(ring.out_dir+sample.step_names[i_step]+'_diams_'+orient+'.txt')
        x.append(txt_data[:, 0]), z.append(txt_data[:, 1]), diams.append(txt_data[:, 2])
except:
    l_centers, l_errs     = np.zeros((sample.n_load_step, sample.n_data_pt)), np.zeros((sample.n_load_step, sample.n_data_pt))
    u_centers, u_errs     = np.zeros((sample.n_load_step, sample.n_data_pt)), np.zeros((sample.n_load_step, sample.n_data_pt))
    diams                 = np.zeros((sample.n_load_step, sample.n_data_pt))
    for i_step in range(sample.n_load_step):         
        l_centers[i_step,:], l_errs[i_step,:], u_centers[i_step,:], u_errs[i_step,:], diams[i_step,:] = DataAnalysis.write_scan_diameters(sample, ring, x1d, z1d, i_step, orient)

#%% 

#     total variation filtering 

fits, coords          = [], []
for i_step in range(sample.n_load_step):
    s_fits, s_coords      = [], []
    xvals                 = np.unique(x[i_step])
    for xval in xvals:
        x_col                 = x[i_step][x[i_step]==xval]
        z_col                 = z[i_step][x[i_step]==xval]
        diam_col              = diams[i_step][x[i_step]==xval]
        fit                   = DataAnalysis.total_variation(diam_col, ring.lambda_sel) 
        path                  = ring.out_dir+sample.step_names[i_step]+'_x_'+str(xval)+orient+'.tiff'
示例#29
0
 def testAge(self):
     data = open('data_test.json').read()
     data = json.loads(data)
     result = DataAnalysis.getAgeGross(data)
     self.assertEqual(result[0][0], 19)
     self.assertEqual(result[1][0], 22)
示例#30
0
#Set path for libaries on server
#import sys
#sys.path.insert(0, "/home/student/anaconda3/lib/python3.4/site-packages")

import MakeLogDataDB
import makeSurveyDB
import makeTimeTableDB
import glob, os
import wicount
import db
import DataAnalysis

#Connect to the database
con = db.get_connection()
c=con.cursor()
# Create all the database tables
wicount.SetUpDatabase()
MakeLogDataDB.main()
makeSurveyDB.main()
makeTimeTableDB.main()

#Run the analytics algorithm
DataAnalysis.main()

#Close the database connection
con.close() 
示例#31
0
detector_dist         = 3293.09         # pixels
true_center           = [1027, 1027]    # [row, column] of center in pixels
e_rng                 = [-0.012, 0.012]
p_rng                 = [-0.024, 0.024]
t_rng                 = [-0.036, 0.036]

ring_name             = 'al_311'
left                  = [278, 338]
right                 = [1713, 1773]
top                   = [274, 334]
bottom                = [1709, 1769]
lambda_h              = 2.0
lambda_v              = 0.8
#%%
#    create sample and ring objects
sample                = DA.Specimen(specimen_name, data_dir, out_dir, x_range, x_num, z_range, z_num, step_names, dark_dirs, init_dirs, detector_dist, true_center)
ring                  = DA.Ring(ring_name, sample, left, right, top, bottom, lambda_h, lambda_v)

# create appropriate coordinate arrays

if sample.name == 'ti64_notched' or specimen_name=='ti64_plain':
    xa                    = np.linspace(sample.x_range[1], sample.x_range[0], num=x_num, endpoint=True)
    za                    = np.linspace(sample.z_range[1], sample.z_range[0], num=z_num, endpoint=True)
    z2d, x2d              = np.meshgrid(za, xa)
if sample.name == 'al7075_plain':
    xa                    = np.linspace(x_range[1], x_range[0], num=x_num, endpoint=True)
    za                    = np.linspace(z_range[1], z_range[0], num=z_num, endpoint=True)
    x2d, z2d              = np.meshgrid(xa, za)
if sample.name == 'al7075_mlf':
    xa                    = np.linspace(x_range[0], x_range[1], num=x_num, endpoint=True)
    za                    = np.linspace(z_range[0], z_range[1], num=z_num, endpoint=True)
示例#32
0
文件: testscript.py 项目: cplab/LSCE
def main(argv=None):
    if argv is None:
        argv = sys.argv
    run_importer = True
    run_formatter = True
    run_analysis = True
    try:
        try:
            opts, args = getopt.getopt(argv[1:], "h", ["help", "skip-importer", "skip-formatter", "skip-analysis"])
            print "got args"
        except getopt.error, msg:
                raise Usage(msg)
        for option, data in opts:
            if('-h' == option or '--help' == option):
                print "LSCE test script. Usage: \"python testscript.py [--skip-importer] [--skip-formatter] [--skip-analysis]" +\
                    " mat_source hdf5_dest" +\
                    "\"\n\nSupply the following arguments to run pipeline:\n\n\tmat_source: " +\
                    "The path to the raw .mat files to be imported.\n\thdf5_dest: the name to save hdf5 output file under" +\
                    "\n\nAvailable modes:\n\t--skip-importer: skip importation step. Formatter wil still run using" +\
                    " mat_source as src directory." +\
                    "\n\t--skip-formatter: skip formatting step. Importer will use mat_source as usual. \n\t\t\t  Analysis will" +\
                    " use hdf5_dest if it exists." + \
                    "\n\t--skip-analysis: skip computation of analysis data. Formatter will still output to hdf5_dest. "
                return
            if('--skip-importer' == option):
                run_importer = False
            if('--skip-formatter' == option):
                run_formatter = False
            if('--skip-analysis' == option):
                run_analysis = False
        if(len(args) < 2):
            raise Usage("Insufficient arguments supplied.")
        else:
            print args.__repr__()
        print "Welcome to LSCE test script.\nThis script will perform a " + \
            "complete iteration of our pipeline, starting with the data importer."
        if(run_importer):
            print "Importing data from directory "+args[0]
            Importer.loadFromMat(args[0])
        else:
            print "Skipped importing data."
        if(run_formatter):
            print "Formatting data as hdf5 in "+args[1]+".hdf5"
            DataFormatter.formatData(args[0], args[1])
        else:
            print "Skipped formatting data."
        os.system("PAUSE")
        testing = None
        raw_data = None
        if(run_analysis):
            dtool = DataAnalysis.data_analysis()
            dtool.load_hdf5(args[1], dataset_name="Electrode_12_master", group_name="raw_data")
            dtool.sampling_rate = 1000
            testing = dtool.high_demo_filter(20)
            raw_data = dtool.f["raw_data"]["Electrode_12_master"]
        else:
            print "Skipped data analysis.\nPlaceholder groups " + \
                "\"/data_analysis/demo_filter_results\" and \"/raw_data/Electrode_12_master\" will be used."
            hdfile = h5py.File(args[1]+".hdf5", "r+")
            if("data_analysis" not in hdfile or "demo_filter_results" not in hdfile["data_analysis"]):
                print "Skipping graphs..."
                return
            testing = hdfile["data_analysis"]["demo_filter_results"]
            raw_data = hdfile["raw_data"]["Electrode_12_master"]
        os.system("PAUSE")
        plt.subplot(2, 1, 1)
        plt.plot(testing)
        plt.subplot(2, 1, 2)
        plt.plot(raw_data)
        plt.show()
        if(run_analysis):
            dtool.close()
示例#33
0
#!/usr/bin/python2.7
# -*- coding:utf-8 -*-
from ***.data_analysis import DataAnalysis
import *** 

import sys 

reload(sys)
sys.setdefaultencoding('utf8')

filename = 'data.txt'

analysis=DataAnalysis(filename)

I=analysis.plotXY()

subseq = analysis.findMaxSubSequence()

if subseq == [1, 2, 8, 9, 10, 11, 13]:
    print('PASS')
else:
    print('FAILURE')

sys.stdout.write(str(subseq))






示例#34
0
    SamplingData = Random_Data['GenericData']
    Board = gen.Generic_Generator(Queue=Data_Queue,
                                  binSize=50,
                                  Data=SamplingData)
    Board.start()

while len(Feature) < 5 * TaskSetting['Initiation']:
    if not Data_Queue.empty():
        Sample = Data_Queue.get()
        if not EEG_Recording.any():
            EEG_Recording = Sample
        else:
            EEG_Recording = np.append(EEG_Recording, Sample, axis=0)

        # Common Average Reference
        Sample[:, range(1, 9)] = Process.Common_Average_Reference(
            Sample[:, range(1, 9)])

        # Feature Extraction
        if ALPHA:
            Feature = np.append(
                Feature, Process.AlphaDifference(Sample, ([5, 6], [7, 8]),
                                                 250))
        else:
            Feature = np.append(
                Feature, Process.PowerExtraction(Sample[:, 3], [8, 12], 250))

FIFO.Rewrite(Trigger_Log, "Calibration On")
Trigger = np.array([[len(Feature), CALIBRATION_START]])

print "Calibration Stage 1"
Current_Index = len(Feature)
示例#35
0
# distribution of GDP per person. If the input is finish, then it will plot the
# histogram and boxplot from 2007 to 2012.

import pandas as pd
from DataAnalysis import *
from Methods import *
from InvalidInputException import *
import sys

if __name__ == "__main__":
    countries = pd.read_csv("countries.csv")
    income = pd.read_excel("indicator gapminder gdp_per_capita_ppp.xlsx", index_col=0).T
    print(income.head())
    while True:
        try:
            year_string = input("Enter a year between 1800 and 2012 to display the distribution, or 'finish' to exit")
            if year_string.isdigit() and int(year_string) >= 1800 and int(year_string) <= 2012:
                year = int(year_string)
                displayIncome(year, income)
            elif year_string == "finish":
                for i in range(2007, 2013):
                    cur_data = merge_by_year(i, income, countries)
                    cur_data_analysis = DataAnalysis(i)
                    cur_data_analysis.histogram(cur_data)
                    cur_data_analysis.boxplot(cur_data)
                break
            else:
                raise InvalidInputException()
        except KeyboardInterrupt:
            sys.exit()
#da.__testInialization__()
#da.__printDataset__()


#------------ Simple Test 1 ------------
# *** Please refer to "testInialization" for the feature name(s)
#linearRegressionOutput = DA.linearRegression('Feature1', 'Feature2')


#filteringOutput = DA.filtering2("Feature5", "Feature5", ">=", 0)
#filteringOutput = DA.filtering("Feature5", ["Feature5"], [">="], [0], [])
#filteringOutput = DA.filtering("Feature5", ["Feature5", "Feature5"], [">=", "<="], [0, 5], ["AND"])
#filteringOutput = DA.filtering("Feature5", ["Feature5", "Feature5"], [">=", "<="], [0, 3], ["OR"])
#filteringOutput = DA.filtering("Feature5", ["Feature5", "Feature5", "Feature5"], [">=", "<=", "<="], [0, 5, 3], ["AND", "AND"])
#filteringOutput = DA.filtering("Feature5", ["Feature5", "Feature5", "Feature5","Feature5","Feature5","Feature5"], ["<=", "<=", "<=","<=","<=","<="], [10, 9, 8,7,6,5], ["AND", "AND","AND", "AND","AND"])
filteringOutput = DA.filtering("Feature2", ["Feature2"], ["Does Not Contain"], ["HI"], [])

#filteringOutput = DA.filtering("Feature1", ["Feature2","Feature2"], ["Contains","Does Not Contain"], ["hi","hi"], ["AND"])
#filteringOutput = DA.filtering("Feature1", ["Feature2","Feature2"], ["Contains","Does Not Contain"], ["hi","hi"], ["OR"])

# 1. output of linearRegression
#if type(linearRegressionOutput) == str:
#    print (linearRegressionOutput)
#print (linearRegressionOutput[0])
#print ('\nCoefficients:', linearRegressionOutput[1])
#print ('Y-intecept:', linearRegressionOutput[2])
#print ('R^2:', linearRegressionOutput[3])

# 2. output of filtering
if type(filteringOutput) == str:
    print (filteringOutput)