Exemple #1
0
    def validate_file_dimensions(cls, descriptors: [FileDescriptor],
                                 data_model: DataModel) -> bool:
        # Get list of paths of selected files
        if len(descriptors) > 0:

            # If precalibration file is in use, add that name to the list
            if data_model.get_precalibration_type(
            ) == Constants.CALIBRATION_FIXED_FILE:
                calibration_descriptor = \
                    RmFitsUtil.make_file_descriptor(data_model.get_precalibration_fixed_path())
                descriptors.append(calibration_descriptor)

            # Get binning and dimension of first to use as a reference
            assert len(descriptors) > 0
            reference_file: FileDescriptor = descriptors[0]
            reference_binning = reference_file.get_binning()
            reference_x_size = reference_file.get_x_dimension()
            reference_y_size = reference_file.get_y_dimension()

            # Check all files in the list against these specifications
            descriptor: FileDescriptor
            for descriptor in descriptors:
                if descriptor.get_binning() != reference_binning:
                    return False
                if descriptor.get_x_dimension() != reference_x_size:
                    return False
                if descriptor.get_y_dimension() != reference_y_size:
                    return False

        return True
Exemple #2
0
    def original_non_grouped_processing(self, selected_files: [FileDescriptor],
                                        data_model: DataModel,
                                        output_file: str, console: Console):
        console.push_level()
        console.message("Using single-file processing", +1)
        # We'll use the first file in the list as a sample for things like image size
        assert len(selected_files) > 0
        # Confirm that these are all dark frames, and can be combined (same binning and dimensions)
        if FileCombiner.all_compatible_sizes(selected_files):
            self.check_cancellation()
            if data_model.get_ignore_file_type() or FileCombiner.all_of_type(
                    selected_files, FileDescriptor.FILE_TYPE_DARK):
                # Get (most common) filter name in the set
                # Since these are darks, the filter is meaningless, but we need the value
                # for the shared "create file" routine
                filter_name = SharedUtils.most_common_filter_name(
                    selected_files)

                # Do the combination
                self.combine_files(selected_files, data_model, filter_name,
                                   output_file, console)
                self.check_cancellation()
                # Files are combined.  Put away the inputs?
                # Return list of any that were moved, in case the UI needs to be adjusted
                substituted_folder_name = SharedUtils.substitute_date_time_filter_in_string(
                    data_model.get_disposition_subfolder_name())
                self.handle_input_files_disposition(
                    data_model.get_input_file_disposition(),
                    substituted_folder_name, selected_files, console)
            else:
                raise MasterMakerExceptions.NotAllDarkFrames
        else:
            raise MasterMakerExceptions.IncompatibleSizes
        console.message("Combining complete", 0)
        console.pop_level()
 def testing(self):
     print "Running ITXLogFetcher.testing()"
     DataModel.testing()
     # logfetcher.testing3()
     self.bottom_panel.testing1()
     self.top_panel.testing2()
     print len(self.top_panel.matchingfiles)
     print "FINISHED ITXLogFetcher.testing()"
    def __init__(self,  files_to_copy):
        """Init Worker Thrlead Class."""
        Thread.__init__(self)
        self.files_to_copy = files_to_copy
        self.done = False
        self.abort = False

        (self.zipoption, self.zip_filename) = DataModel.get_zipconfig()
        self.log_destination_path = DataModel.get_logdestinationpath()
Exemple #5
0
    def process_one_group(self, data_model: DataModel,
                          descriptor_list: [FileDescriptor],
                          output_directory: str, combine_method: int,
                          disposition_folder_name, console: Console):
        """
        Process one group of files, output to the given directory
        Exceptions thrown:
            NotAllFlatFrames        The given files are not all flat frames
            IncompatibleSizes       The given files are not all the same dimensions

        :param data_model:                  Data model giving options for current run
        :param descriptor_list:             List of all the files in one group, for processing
        :param output_directory:            Path to directory to receive the output file
        :param combine_method:              Code saying how these files should be combined
        :param disposition_folder_name:     If files to be moved after processing, name of receiving folder
        :param console:                     Re-directable console output object
        """
        assert len(descriptor_list) > 0
        sample_file: FileDescriptor = descriptor_list[0]
        console.push_level()
        self.describe_group(data_model, len(descriptor_list), sample_file,
                            console)

        # Make up a file name for this group's output, into the given directory
        file_name = SharedUtils.get_file_name_portion(
            combine_method, sample_file, data_model.get_sigma_clip_threshold(),
            data_model.get_min_max_number_clipped_per_end())
        output_file = f"{output_directory}/{file_name}"

        # Confirm that these are all flat frames, and can be combined (same binning and dimensions)
        if self.all_compatible_sizes(descriptor_list):
            if data_model.get_ignore_file_type() \
                    or FileCombiner.all_of_type(descriptor_list, FileDescriptor.FILE_TYPE_FLAT):
                # Get (most common) filter name in the set
                # Get filter name to go in the output FITS metadata.
                # All the files should be the same filter, but in case there are stragglers,
                # get the most common filter from the set
                filter_name = SharedUtils.most_common_filter_name(
                    descriptor_list)

                # Do the combination
                self.combine_files(descriptor_list, data_model, filter_name,
                                   output_file, console)
                self.check_cancellation()
                # Files are combined.  Put away the inputs?
                # Return list of any that were moved, in case the UI needs to be adjusted
                self.handle_input_files_disposition(
                    data_model.get_input_file_disposition(),
                    disposition_folder_name, descriptor_list, console)
                self.check_cancellation()
            else:
                raise MasterMakerExceptions.NotAllFlatFrames
        else:
            raise MasterMakerExceptions.IncompatibleSizes

        console.pop_level()
    def __init__(self, username, first_date, last_date):
        self.dm = DataModel(
            username
        )  #After integration, this will take person_id, not the other way around.
        self.person_id = self.dm.person_id
        self.first_date = first_date
        self.last_date = last_date

        self.cached_data = None
        self.update_cache()
Exemple #7
0
	def __init__(self, data = [], parent=None,  *args):
		QTableView.__init__(self, parent, *args)
		self.data_model          = DataModel(self)
		self.left_frozen_view    = None
		self.corner_frozen_view  = None
		self.top_frozen_view     = None
		self.update_data_model()

		self.setHorizontalScrollMode(QAbstractItemView.ScrollPerPixel)
		self.setVerticalScrollMode(  QAbstractItemView.ScrollPerPixel)
		self.enable_forzen_view(0, 1)
def unzip_file(filename):
    print "unzip file - " + filename
    if zipfile.is_zipfile(filename):
        print "0 checked if is zip"
        zfile = zipfile.ZipFile(filename)
        print "opened that shit"
        print DataModel.get_logdestinationpath()
        print "1 is zip file - " + DataModel.get_logdestinationpath()
        dir = DataModel.get_logdestinationpath() + "\\unzipped\\"
        print "2 before " + dir
        zfile.extractall(dir)
        print "3 after " + dir
 def __init__(self,  datefrom, dateto):
     """Init Worker Thread Class."""
     Thread.__init__(self)
     self.logpaths = DataModel.get_logsourcepath()
     self.servername = DataModel.get_servername()
     self.datefrom = datefrom
     self.dateto = dateto
     self.done = False
     self.abort = False
     # This starts the thread running on creation, but you could
     # also make the GUI thread responsible for calling this
     self.start()
def main():
    print "Loading data..."
    data_model = DataModel()
    input_size = len(data_model.top_words)
    output_size = len(data_model.get_genres())
   
#    simple_history = evaluate_model(create_simple_model(input_size, output_size), data_model, "Simple")
#    tanh_history = evaluate_model(create_one_layer_tanh_model(input_size, output_size), data_model, "One Layer tanh")

    hidden_sizes = [i*10 for i in range(7, 11)]
    for hidden_size in hidden_sizes:
        HIDDEN_SIZE = hidden_size
        sigmoid_history = evaluate_model(create_one_layer_sigmoid_model(input_size, output_size), data_model, "One Layer sigmoid")
Exemple #11
0
    def OnApply(self, event):

        # get the required info from the interface
        picked_dateto = DataModel.get_day(self.dateto)
        picked_datefrom = DataModel.get_day(self.datefrom)

        # Start a new thread and look for files that match the dates and directories
        # Needs a new thread so ProgressDialog and LogFetcher can run in parallel.
        # Otherwise ProgressDialog would wait for LogFetcher to complete before running
        lfthread = LogFetcherThread(picked_datefrom, picked_dateto)

        # Show a popup with a progress bar
        PopupDialog.ProgressDialog(self, lfthread, message = "Searching for files...")
        self.UpdateFilelist()
Exemple #12
0
def main():
    print "Loading data..."
    data_model = DataModel()
    input_size = len(data_model.top_words)
    output_size = len(data_model.get_genres())

    #    simple_history = evaluate_model(create_simple_model(input_size, output_size), data_model, "Simple")
    #    tanh_history = evaluate_model(create_one_layer_tanh_model(input_size, output_size), data_model, "One Layer tanh")

    hidden_sizes = [i * 10 for i in range(7, 11)]
    for hidden_size in hidden_sizes:
        HIDDEN_SIZE = hidden_size
        sigmoid_history = evaluate_model(
            create_one_layer_sigmoid_model(input_size, output_size),
            data_model, "One Layer sigmoid")
    def setUp(self):
        self.data = [['Martinez', 'Emmanuel', 'Male', 'Teal', '06/26/1996'],
                     ['Flores', 'Jenipher', 'Female', 'Black', '11/10/1994'],
                     ['Lomeli', 'Fernando', 'Male', 'Red', '06/26/1993'],
                     ['Fonseca', 'Adrian', 'Male', 'Black', '11/10/1980']]

        birthdate_data = OrderedDict([
            ("LastName", ["Fonseca", "Lomeli", "Flores", "Martinez"]),
            ("FirstName", ["Adrian", "Fernando", "Jenipher", "Emmanuel"]),
            ("Gender", ["Male", "Male", "Female", "Male"]),
            ("FavoriteColor", ["Black", "Red", "Black", "Teal"]),
            ("DateOfBirth", ["11/10/80", "06/26/93", "11/10/94", "06/26/96"])
        ])

        self.birthdate_data_frame = pd.DataFrame(birthdate_data,
                                                 columns=birthdate_data.keys())

        gender_data = OrderedDict([
            ("LastName", ["Flores", "Fonseca", "Lomeli", "Martinez"]),
            ("FirstName", ["Jenipher", "Adrian", "Fernando", "Emmanuel"]),
            ("Gender", ["Female", "Male", "Male", "Male"]),
            ("FavoriteColor", ["Black", "Black", "Red", "Teal"]),
            ("DateOfBirth",
             ["11/10/1994", "11/10/1980", "06/26/1993", "06/26/1996"])
        ])

        self.gender_data_frame = pd.DataFrame(gender_data,
                                              columns=gender_data.keys())

        name_data = OrderedDict([
            ("LastName", ["Flores", "Fonseca", "Lomeli", "Martinez"]),
            ("FirstName", ["Jenipher", "Adrian", "Fernando", "Emmanuel"]),
            ("Gender", ["Female", "Male", "Male", "Male"]),
            ("FavoriteColor", ["Black", "Black", "Red", "Teal"]),
            ("DateOfBirth",
             ["11/10/1994", "11/10/1980", "06/26/1993", "06/26/1996"])
        ])

        self.name_data_frame = pd.DataFrame(name_data,
                                            columns=name_data.keys())

        self.data_frame = pd.DataFrame(self.data,
                                       columns=[
                                           'LastName', 'FirstName', 'Gender',
                                           'FavoriteColor', 'DateOfBirth'
                                       ])
        self.input_path_list = ['testdata1.csv', 'testdata2.csv']
        self.data_model_object = DataModel(self.input_path_list)
Exemple #14
0
 def UpdateServerInfo(self, si):
     return self.sendAndWaitRsp({
         "update_server": {
             "cookie": DataModel.Instance().GetCookie(),
             "si": si
         }
     })
Exemple #15
0
 def DeleteServers(self, rids):
     return self.sendAndWaitRsp({
         "delete_servers": {
             "cookie": DataModel.Instance().GetCookie(),
             "rids": rids
         }
     })
Exemple #16
0
def main(args):
    model = DataModel(
        url=
        'https://s.taobao.com/search?q=%E9%9B%B6%E9%A3%9F&imgfile=&commend=all&ssid=s5-e&search_type=item&sourceId=tb.index&spm=a21bo.50862.201856-taobao-item.1&ie=utf8&initiative_id=tbindexz_20161104&sort=sale-desc',
        pattern=
        '"raw_title":"([^"]*)","pic_url":"([^"]*)","detail_url":"([^"]*)","view_price":"([^"]*)","view_fee":"[^"]*","item_loc":"[^"]*","reserve_price":"[^"]*","view_sales":"(\d+)人付款","comment_count":"(\d+)"',
        resultFileName='result.html')
 def __init__(self):
     self.dataModel = DataModel('./images', './segmaps')
     self.dataModel.getImageList()
     self.opStack=[]
     self.app = wx.App()
     self.view = View(None)
     self.view.setFileNameList(self.dataModel.imageNameList)
     self.TOOL_STEM_NEW = 10
     self.TOOL_STEM_ADD = 20
     self.TOOL_STEM_MINUS = 30
     self.TOOL_LEAF_NEW = 40
     self.TOOL_LEAF_ADD = 50
     self.TOOL_LEAF_MINUS = 60
     self.TOOL_LEAF_LASSO_MINUS = 61
     self.TOOL_ZOOM_IN = 70
     self.TOOL_ZOOM_OUT = 80
     self.TOOL_ZOOM_BACK = 90
     self.curTool = self.TOOL_STEM_NEW
     self.curImageIdx = 0
     self.curImageName = self.dataModel.imageNameList[self.curImageIdx]
     self.curImage = self.dataModel.getImageByIdx(self.curImageIdx)
     self.curSegmap = Segmap(self.dataModel.getSegmapByIdx(self.curImageIdx), self.dataModel.loadDependcy(self.curImageName))
     self.curThreshold = self.view.thresholdSlider.GetValue()
     self.curSorghumId = None
     self.curLeafId = None
     self.curSelectedId = -1
     self.curSelectedType = 'sorghum'
     self.curZoom = 1.0
     self.leafSA = SelectedArea(np.zeros((self.curSegmap.height, self.curSegmap.width)), type=SelectedArea.TYPE_LEAF)
     self.stemSA = SelectedArea(np.zeros((self.curSegmap.height, self.curSegmap.width)), type=SelectedArea.TYPE_STEM)
     eventManager.Register(self.OnCanvasLeftDown, wx.EVT_LEFT_DOWN, self.view.canvasPanel)
     eventManager.Register(self.OnCanvasMotion, wx.EVT_MOTION, self.view.canvasPanel)
     eventManager.Register(self.OnCanvasLeftUp, wx.EVT_LEFT_UP, self.view.canvasPanel)
     eventManager.Register(self.OnToolChange, wx.EVT_TOOL, self.view.verToolBar)
     eventManager.Register(self.OnThresholdSliderChange, wx.EVT_SLIDER, self.view.thresholdSlider)
     eventManager.Register(self.OnThresholdTextChange, wx.EVT_TEXT_ENTER, self.view.thresholdTextCtrl)
     eventManager.Register(self.OnFileChange, wx.EVT_LISTBOX, self.view.fileListBox)
     eventManager.Register(self.OnTreeKeyDown, wx.EVT_KEY_DOWN, self.view.sorghumTreeCtrl)
     eventManager.Register(self.OnTreeRightDown, wx.EVT_TREE_SEL_CHANGED, self.view.sorghumTreeCtrl)
     self.logger = logging.getLogger('Controller')
     self.logger.setLevel(logging.DEBUG)
     ch = logging.StreamHandler()
     ch.setLevel(logging.DEBUG)
     self.logger.addHandler(ch)
     self.logger.info('init')
     self.init()
Exemple #18
0
class DataCache:
    #def __init__(self, person_id, first_date, last_date):
    def __init__(self, username, first_date, last_date):
        self.dm = DataModel(
            username
        )  #After integration, this will take person_id, not the other way around.
        self.person_id = self.dm.person_id
        self.first_date = first_date
        self.last_date = last_date

        self.cached_data = None
        self.update_cache()

    def update_cache(self):
        date_array = DataCache.get_date_array(self.first_date, self.last_date)
        self.cached_data = self.dm.get_records_from_dates(
            self.person_id, date_array)

    def set_dates_and_update_cache_if_necessary(self, new_first_date,
                                                new_last_date):
        refresh_data = False

        if new_first_date < self.first_date:
            refresh_data = True
            self.first_date = new_first_date - dt.timedelta(days=DATA_BUFFER)

        if new_last_date > self.last_date:
            refresh_data = True
            self.last_date = new_last_date + dt.timedelta(days=DATA_BUFFER)

        if refresh_data:
            self.update_cache()

    def get_serialized_data(self, first_date, last_date):
        dates = DataCache.get_date_array(first_date, last_date)
        relevant_data = [
            record for record in self.cached_data
            if dt.date.fromisoformat(record['date']) in dates
        ]

        return dumps(relevant_data)

    def reset(self, first_date, last_date):
        self.first_date = first_date
        self.last_date = last_date
        self.update_cache()

    #TODO: This should maybe go somewhere else, like a utilities folder
    @staticmethod
    def get_date_array(start, end):
        if not (isinstance(start, dt.date) and isinstance(end, dt.date)):
            raise TypeError("start and end must be Dates")

        if start > end:
            raise ValueError("end must come after start.")

        date_del = end - start
        return [start + dt.timedelta(days=i) for i in range(date_del.days + 1)]
    def create_work_item_list(
            self, data_model: DataModel,
            table_model: SessionPlanTableModel) -> [WorkItem]:
        """Create the list of work items from the session plan"""

        result: [WorkItem] = []
        model_rows: int = table_model.rowCount(
            QModelIndex(), ) if data_model.get_use_filter_wheel() else 1
        model_columns: int = table_model.columnCount(QModelIndex())

        # Every combination of row and column with a nonzero entry is a work item
        for row_index in range(model_rows):
            for column_index in range(model_columns):
                index = table_model.createIndex(row_index, column_index)
                cell_value = int(table_model.data(index, Qt.DisplayRole))
                if cell_value != 0:
                    raw_row_index: int = data_model.map_display_to_raw_filter_index(
                        row_index)
                    raw_column_index: int = data_model.map_display_to_raw_binning_index(
                        column_index)
                    filter_spec: FilterSpec = data_model.get_filter_specs(
                    )[raw_row_index]

                    binning: BinningSpec = data_model.get_binning_specs(
                    )[raw_column_index]

                    work_item = WorkItem(cell_value, filter_spec,
                                         binning.get_binning_value(),
                                         data_model.get_target_adus(),
                                         data_model.get_adu_tolerance(),
                                         self._preferences)
                    result.append(work_item)
        return result
Exemple #20
0
 def describe_group(data_model: DataModel, number_files: int,
                    sample_file: FileDescriptor, console: Console):
     binning = sample_file.get_binning()
     exposure = sample_file.get_exposure()
     temperature = sample_file.get_temperature()
     processing_message = ""
     if data_model.get_group_by_size():
         processing_message += f"binned {binning} x {binning}"
     if data_model.get_group_by_exposure():
         if len(processing_message) > 0:
             processing_message += ","
         processing_message += f" exposed {exposure} seconds"
     if data_model.get_group_by_temperature():
         if len(processing_message) > 0:
             processing_message += ","
         processing_message += f" at {temperature} degrees."
     console.message(
         f"Processing {number_files} files {processing_message}", +1)
Exemple #21
0
 def QueryPms(self):
     try:
         return self.sendAndWaitRsp(
             {"query_pms": {
                 "cookie": DataModel.Instance().GetCookie()
             }})
     except Exception as e:
         print(e)
         return None
Exemple #22
0
def main():
    print "Loading data..."
    data_model = DataModel()
    clf = OneVsRestClassifier(LinearSVC(random_state=0))
    print "Training classifier..."
    clf.fit(data_model.train_X, data_model.train_y)
    print "Evaluating classifier..."
    print "Average Test Accuracy", clf.score(data_model.test_X,
                                             data_model.test_y)
Exemple #23
0
    def buildModel(self, pathName, fileName):
        self.vocSize = len(self.wordDict)

        for word in self.hamWordList:
            if not self.wordDict.__contains__(word):
                self.hamRemoveSize += 1
            if self.hamDict.__contains__(word):
                self.hamDict[word] += 1
            else:
                self.hamDict[word] = 1

        for word in self.spamWordList:
            if not self.wordDict.__contains__(word):
                self.spamRemoveSize += 1
            if self.spamDict.__contains__(word):
                self.spamDict[word] += 1
            else:
                self.spamDict[word] = 1
        if self.hamRemoveSize != 0:
            hamCounts = len(self.hamWordList) - self.hamRemoveSize
        else:
            hamCounts = len(self.hamWordList)
        if self.spamRemoveSize != 0:
            spamCounts = len(self.spamWordList) - self.spamRemoveSize
        else:
            spamCounts = len(self.spamWordList)

        count = 1
        for word in self.wordDict:
            if self.modelDict.__contains__(word):
                continue
            lineNum = count
            curWord = word
            hamFrenquency = 0
            if self.hamDict.__contains__(word):
                hamFrenquency = self.hamDict.get(word)
            conHamPro = (hamFrenquency +
                         self.delta) / (hamCounts + self.delta * self.vocSize)
            spamFrenquency = 0
            if self.spamDict.__contains__(word):
                spamFrenquency = self.spamDict.get(word)
            conSpamPro = (spamFrenquency + self.delta) / (
                spamCounts + self.delta * self.vocSize)
            curModel = DataModel(lineNum, curWord, hamFrenquency, conHamPro,
                                 spamFrenquency, conSpamPro)
            self.modelDict[word] = curModel
            count += 1
        path = pathName
        name = fileName
        fileName = os.path.join(path, name)
        f = open(fileName, 'w', encoding='latin-1', errors='ignore')

        for model in self.modelDict.values():
            line = self.parser.modelParser(model)
            f.write(line + '\n')
        f.close()
Exemple #24
0
 def QueryServerInfo(self, rids):
     try:
         return self.sendAndWaitRsp({
             "query_server": {
                 "cookie": DataModel.Instance().GetCookie(),
                 "rids": rids
             }
         })
     except Exception as e:
         traceback.print_exc()
Exemple #25
0
 def __init__(self):
     '''
     Constructor
     '''        
     self.connected_switches = []
     self.discovery_timer = utils.Timer(Discovery.DISCOVERY_INTERVAL, self._send_discovery_packets, [], True)
     self.remove_expired_timer = utils.Timer(Discovery.REMOVE_EXPIRED_INTERVAL, self._remove_expired_links, [], True)
     self.graph = DataModel()
     self.handlers = []
     self.change_lock = Lock()
Exemple #26
0
    def original_non_grouped_processing(self, selected_files: [FileDescriptor],
                                        data_model: DataModel,
                                        output_file: str, console: Console):
        """
        Process one set of files to a single output file.
        Output to the given path, if provided.  If not provided, prompt the user for it.
        :param selected_files:      List of descriptions of files to be combined
        :param data_model:          Data model that gives combination method and other options
        :param output_file:         Path for the combined output file
        :param console:             Re-directable console output object
        """
        console.push_level()
        console.message("Using single-file processing", +1)
        # We'll use the first file in the list as a sample for things like image size
        assert len(selected_files) > 0
        # Confirm that these are all flat frames, and can be combined (same binning and dimensions)
        if FileCombiner.all_compatible_sizes(selected_files):
            self.check_cancellation()
            if data_model.get_ignore_file_type() or FileCombiner.all_of_type(
                    selected_files, FileDescriptor.FILE_TYPE_FLAT):
                # Get (most common) filter name in the set
                # What filter should we put in the metadata for the output file?
                filter_name = SharedUtils.most_common_filter_name(
                    selected_files)

                # Do the combination
                self.combine_files(selected_files, data_model, filter_name,
                                   output_file, console)
                self.check_cancellation()
                # Files are combined.  Put away the inputs?
                # Return list of any that were moved, in case the UI needs to be adjusted
                substituted_folder_name = SharedUtils.substitute_date_time_filter_in_string(
                    data_model.get_disposition_subfolder_name())
                self.handle_input_files_disposition(
                    data_model.get_input_file_disposition(),
                    substituted_folder_name, selected_files, console)
            else:
                raise MasterMakerExceptions.NotAllFlatFrames
        else:
            raise MasterMakerExceptions.IncompatibleSizes
        console.message("Combining complete", 0)
        console.pop_level()
Exemple #27
0
    def process_one_group(self, data_model: DataModel,
                          descriptor_list: [FileDescriptor],
                          output_directory: str, combine_method: int,
                          disposition_folder_name, console: Console):
        assert len(descriptor_list) > 0
        sample_file: FileDescriptor = descriptor_list[0]
        console.push_level()
        self.describe_group(data_model, len(descriptor_list), sample_file,
                            console)

        # Make up a file name for this group's output, into the given directory
        file_name = SharedUtils.get_file_name_portion(
            combine_method, sample_file, data_model.get_sigma_clip_threshold(),
            data_model.get_min_max_number_clipped_per_end())
        output_file = f"{output_directory}/{file_name}"

        # Confirm that these are all dark frames, and can be combined (same binning and dimensions)
        if self.all_compatible_sizes(descriptor_list):
            if data_model.get_ignore_file_type() \
                    or FileCombiner.all_of_type(descriptor_list, FileDescriptor.FILE_TYPE_DARK):
                # Get (most common) filter name in the set
                # Since these are darks, the filter is meaningless, but we need the value
                # for the shared "create file" routine
                filter_name = SharedUtils.most_common_filter_name(
                    descriptor_list)

                # Do the combination
                self.combine_files(descriptor_list, data_model, filter_name,
                                   output_file, console)
                self.check_cancellation()
                # Files are combined.  Put away the inputs?
                # Return list of any that were moved, in case the UI needs to be adjusted
                self.handle_input_files_disposition(
                    data_model.get_input_file_disposition(),
                    disposition_folder_name, descriptor_list, console)
                self.check_cancellation()
            else:
                raise MasterMakerExceptions.NotAllDarkFrames
        else:
            raise MasterMakerExceptions.IncompatibleSizes
        console.pop_level()
Exemple #28
0
 def onClieckLogin(self):
     rlt = RestfulApiClient().Login(self.editUsr.text(),self.editPassword.text())
     if not rlt or rlt["login_result"]["result"] != "success":
         self.failedTimes += 1
         self.labelToolTip.show()
         self.labelToolTip.setStyleSheet("color:red;font-size:18px")
         self.labelToolTip.setText("login failed, please input again")
         self.adjustSize()
         if self.failedTimes > 3: return self.reject()
     else:
         DataModel.Instance().UpdateUser(rlt["login_result"])
         return self.accept()
Exemple #29
0
 def QueryUpdateDetail(self, prj, module, ver):
     try:
         return self.sendAndWaitRsp({
             "query_update_detail": {
                 "cookie": DataModel.Instance().GetCookie(),
                 "project": prj,
                 "module": module,
                 "version": ver
             }
         })
     except Exception as e:
         traceback.print_exc()
Exemple #30
0
 def QueryMi(self, prj, module):
     try:
         return self.sendAndWaitRsp({
             "query_mi": {
                 "cookie": DataModel.Instance().GetCookie(),
                 "project": prj,
                 "module": module
             }
         })
     except Exception as e:
         traceback.print_exc()
         return None
Exemple #31
0
def updateTableVisualisation(intermediateValue, idColumnValue, timeColumnValue,
                             valueColumnValue):
    if idColumnValue is not None and timeColumnValue is not None and valueColumnValue is not None:
        #print(idColumnValue, timeColumnValue, valueColumnValue)

        if intermediateValue is not None:
            translog_raw = pd.read_json(intermediateValue, orient='split')
            preprocessor = DataModel(transaction_log=translog_raw,
                                     cust_id=idColumnValue,
                                     timestamp=timeColumnValue,
                                     amount_spent=valueColumnValue)
            translog = preprocessor.get_translog()
            translog_by_cohort = preprocessor.aggregate_translog_by_cohort(
                trans_log=translog, dep_var="amount_spent")
            translog_by_cohort = preprocessor.compute_cohort_ages(
                trans_log=translog_by_cohort,
                acq_timestamp="cohort",
                order_timestamp="timestamp",
                by="month")
            # plt = preprocessor.plot_linechart(
            #     cohort_trans_log=translog_by_cohort,
            #     dep_var="amount_spent",
            #     view="cohort-age"
            # )
            plt = preprocessor.plot_c3(cohort_trans_log=translog_by_cohort,
                                       dep_var="amount_spent")
            return dcc.Graph(id='cohort_data',
                             figure=plt), translog.to_json(date_format='iso',
                                                           orient='split')
Exemple #32
0
 def describe_group(data_model: DataModel, number_files: int,
                    sample_file: FileDescriptor, console: Console):
     """
     Display, on the console, a descriptive text string for the group being processed, using a given sample file
     :param data_model:      Data model giving the processing options
     :param number_files:    Number of files in the group being processed
     :param sample_file:     Sample file, representative of the characterstics of files in the group
     :param console:         Redirectable output console
     """
     binning = sample_file.get_binning()
     temperature = sample_file.get_temperature()
     message_parts: [str] = []
     if data_model.get_group_by_size():
         message_parts.append(f"binned {binning} x {binning}")
     if data_model.get_group_by_filter():
         message_parts.append(
             f"with {sample_file.get_filter_name()} filter")
     if data_model.get_group_by_temperature():
         message_parts.append(f"at {temperature} degrees")
     processing_message = ", ".join(message_parts)
     console.message(
         f"Processing {number_files} files {processing_message}.", +1)
Exemple #33
0
    def validate_file_dimensions(cls, descriptors: [FileDescriptor],
                                 data_model: DataModel) -> bool:
        """
        Determine if the dimensions of all the supplied files are the same.
        All selected files must be the same size and the same binning.
        Include the precalibration bias or dark file in this test if that method is selected.

        :param descriptors:     Files to be checked for compatibility
        :param data_model:      Data model gives precalibration type and file if needed
        :return:                True if all files are the same size and binning, so compatible
        """
        # Get list of paths of selected files
        if len(descriptors) > 0:

            # If precalibration file is in use, add that name to the list
            if data_model.get_precalibration_type(
            ) == Constants.CALIBRATION_FIXED_FILE:
                calibration_descriptor = \
                    RmFitsUtil.make_file_descriptor(data_model.get_precalibration_fixed_path())
                descriptors.append(calibration_descriptor)

            # Get binning and dimension of first to use as a reference
            assert len(descriptors) > 0
            reference_file: FileDescriptor = descriptors[0]
            reference_binning = reference_file.get_binning()
            reference_x_size = reference_file.get_x_dimension()
            reference_y_size = reference_file.get_y_dimension()

            # Check all files in the list against these specifications
            descriptor: FileDescriptor
            for descriptor in descriptors:
                if descriptor.get_binning() != reference_binning:
                    return False
                if descriptor.get_x_dimension() != reference_x_size:
                    return False
                if descriptor.get_y_dimension() != reference_y_size:
                    return False

        return True
Exemple #34
0
 def Publish(self, prj, module, ver, gids, detail, code, hash, url):
     return self.sendAndWaitRsp({
         "publish": {
             "cookie": DataModel.Instance().GetCookie(),
             "project": prj,
             "module": module,
             "version": ver,
             "gids": gids,
             "detail": detail,
             "code": code,
             "hash": hash,
             "url": url
         }
     })
Exemple #35
0
def uploader():
    """
    Basic file uploading using a form.
    Render template form if method is GET
    Create image and place in template if method is POST
    :return: rendered templates
    """

    if request.method == 'GET':
        # default response when a form is called. Renders 'form/form_file_upload.html'
        return render_template('form_file_upload.html')

    elif request.method == 'POST':
        # response when the submit button is clicked in the 'form/form_file_upload.html'

        # get file from request object
        f = request.files['file']

        # Creates a DataModel object and calls the bar.plot() method passing the file from the file upload
        x = DataModel()
        results = x.bar_plot(f)

        return render_template('draw_form.html', results=results)
Exemple #36
0
 def new_menu_triggered(self):
     """Respond to 'new' menu by opening a new default plan file"""
     print("new_menu_triggered")
     # Protected save in case unsaved changes will get wiped
     self.protect_unsaved_close()
     # Get a new data model with default values, load those defaults into
     # the in-use model
     new_model = DataModel.make_from_preferences(self._preferences)
     self._data_model.load_from_model(new_model)
     # Populate window
     self.set_ui_from_data_model(self._data_model)
     # Set window title to unsaved
     self.ui.setWindowTitle(Constants.UNSAVED_WINDOW_TITLE)
     self.set_is_dirty(False)
Exemple #37
0
    def OnSortColumnBySelection(self, event):
        '''
        When the user selects "Sort by..." sort the filelist by the user's choice
        :param event: wxpython event
        :return: None
        '''
        # sort_selection has 3 choices:
        # 0 = sort by file name, 1 = sort by date, 2 = sort by path
        sort_selection = event.GetEventObject().GetCurrentSelection()
        formatted_matchingfiles = DataModel.sortColumn(sort_selection)

        # Clear the filelist and update it with the sorted matchingfiles
        self.filelist.DeleteAllItems()
        for myfile in formatted_matchingfiles:
            self.filelist.AppendItem(myfile)
Exemple #38
0
    def InitDateUI(self):
        hsizer = wx.BoxSizer(wx.HORIZONTAL)

        ''' INITIALIZE DatePickers '''
        self.datefrom = wx.DatePickerCtrl(self, -1, style = wx.DP_DROPDOWN | wx.DP_SHOWCENTURY, size = (135, -1))
        self.dateto = wx.DatePickerCtrl(self, -1, style = wx.DP_DROPDOWN | wx.DP_SHOWCENTURY, size = (146, -1))

        """ set default day"""
        datefrom_date = wx.DateTime()
        yesterday_date = DataModel.get_prevday()
        datefrom_date.Set(yesterday_date.day, yesterday_date.month - 1, yesterday_date.year)

        self.datefrom.SetValue(datefrom_date)
        self.label = wx.StaticText(self, -1, "")

        # initialize date from and date to labels
        datefromlabel = wx.StaticText(self, -1, label = "From: ", size = (40, -1))
        datetolabel = wx.StaticText(self, -1, "To: ")


        # init Search for files button
        applybutton = wx.Button(self, -1, label = "Search for files")
        applybutton.Bind(wx.EVT_BUTTON, self.OnApply)
        applybutton.SetDefault()

        # init Save files button
        savebutton = wx.Button(self, -1, label = "Save selected files to folder")
        savebutton.Bind(wx.EVT_BUTTON, self.OnSave)

        ''' Add controls to Sizer '''
        hsizer.Add(datefromlabel, 0, wx.ALIGN_CENTER_VERTICAL)
        hsizer.Add(self.datefrom, 0)
        hsizer.Add((10, 0), 0)
        hsizer.Add(datetolabel, 0, wx.ALIGN_CENTER_VERTICAL)
        hsizer.Add(self.dateto, 0)
        hsizer.Add((10, 0), 1)

        hsizer.Add(applybutton, 0, wx.ALIGN_CENTER_VERTICAL)
        hsizer.AddSpacer((10, 0))
        hsizer.Add(savebutton, 0, wx.ALIGN_CENTER_VERTICAL)

        return hsizer
Exemple #39
0
 cfg = {
     "usr":
     "******",  # 登录UI用账号
     "password":
     "******",  # 登录UI用密码
     "pubUrl":
     "http://192.168.221.134:8210/",  # 发布后下载url路径
     "pubTopic":
     "publish",  # 
     "sftpAddr":
     [("192.168.221.134", 22, 'root', 'Baidu.com22')],  # sftp上传ip、端口、账号、密码
     "prjs": [
         # 项目名,模块名,发布版本号,待发布文件目录,待发布文件名称前缀,发布描述
         ("aods", "aods-x64-win", "0.0.0.0001", ".\\aods-x64-win\\",
          "aods-x64-win", "fix the bug .1.023,1"),
         ("aods", "aodc-x64-win", "0.0.0.0001", ".\\aodc-x64-win\\",
          "aodc-x64-win", "fix the bug .1.023,1")
     ]
 }
 rlt = RestfulApiClient().Login(cfg['usr'], cfg['password'])  # 修改为api发布消息
 if not rlt or rlt["login_result"]["result"] != "success":
     Log(LOG_ERROR, "Publish", "Failed to login")
     sys.exit(1)
 DataModel.Instance().UpdateUser(rlt["login_result"])
 p = Publish(cfg["sftpAddr"], cfg["pubUrl"])
 for (prj, mn, ver, folder, pubFileName, detail) in cfg["prjs"]:
     if not p.Publish(prj, mn, ver, folder, pubFileName, detail):
         os.system("pause")
         sys.exit(1)
 print("执行结束......")
 time.sleep(20.0)
Exemple #40
0
class Discovery:
    '''
    This class learn the network topology, and saves it in the DataModel class
    its sends periodic (every DISCOVERY_INTERVAL ) LLDP packets to all connected switches, and updated the topology accordingly
    in addition its also remove links from the graph when they didn't been active for the X seconds or an indication for link loss occours  
    '''
    __metaclass__ = utils.SingletonType
  
    LLDP_DST_ADDR  = '\x01\x80\xc2\x00\x00\x0e'
    DISCOVERY_INTERVAL = 1
    REMOVE_EXPIRED_INTERVAL = 3

    def __init__(self):
        '''
        Constructor
        '''        
        self.connected_switches = []
        self.discovery_timer = utils.Timer(Discovery.DISCOVERY_INTERVAL, self._send_discovery_packets, [], True)
        self.remove_expired_timer = utils.Timer(Discovery.REMOVE_EXPIRED_INTERVAL, self._remove_expired_links, [], True)
        self.graph = DataModel()
        self.handlers = []
        self.change_lock = Lock()
    
    def _remove_expired_links(self):
        expired_links=self.graph.get_expired_links()
        if len(expired_links) > 0:
            #log.debug('Discovery: removing %i expired links %s' %(len(expired_links),expired_links)) 
            for (a,port1,b,port2) in expired_links:
                if self.graph.link_is_dead(a, port1, b, port2):
                    log.debug('Link is removed due to [timeout]: (%i %i)<->(%i %i)' % (a,port1,b,port2))
            self._tree_changed()
    
    def _send_discovery_packets(self):
        '''
            sends discovery packets to all connected switches
        '''
        #log.debug('Discovery: sending LLDP to %i connected switches' % (len(self.connected_switches)) )        
        for switch_event in self.connected_switches:
            self.send_LLDP_to_switch(switch_event)

    def send_LLDP_to_switch(self,event):
        '''
        sending lldp packet to all of a switch ports        
        :param event: the switch ConnectionUp Event
        '''
        dst = Discovery.LLDP_DST_ADDR
        for p in event.ofp.ports:
            if p.port_no < of.OFPP_MAX:  # @UndefinedVariable
                # Build LLDP packet
                src = str(p.hw_addr)
                port = p.port_no

                lldp_p = lldp() # create LLDP payload
                ch_id=chassis_id() # Add switch ID part
                ch_id.fill(ch_id.SUB_LOCAL,bytes(hex(long(event.dpid))[2:-1])) # This works, the appendix example doesn't
                #ch_id.subtype=chassis_id.SUB_LOCAL
                #ch_id.id=event.dpid
                lldp_p.add_tlv(ch_id)
                po_id = port_id() # Add port ID part
                po_id.subtype = 2
                po_id.id = str(port)
                lldp_p.add_tlv(po_id)
                tt = ttl() # Add TTL
                tt.ttl = 1
                lldp_p.add_tlv(tt)
                lldp_p.add_tlv(end_tlv())
                
                ether = ethernet() # Create an Ethernet packet
                ether.type = ethernet.LLDP_TYPE # Set its type to LLDP
                ether.src = src # Set src, dst
                ether.dst = dst
                ether.payload = lldp_p # Set payload to be the LLDP payload
                
                # send LLDP packet
                pkt = of.ofp_packet_out(action = of.ofp_action_output(port = port))
                pkt.data = ether
                event.connection.send(pkt)  
            
    def _handle_ConnectionUp(self, event):
        '''
        Will be called when a switch is added.
        save the connection event in self.connected_switches 
        Use event.dpid for switch ID, and event.connection.send(...) to send messages to the switch.
        '''
        self.connected_switches.append(event)
        self.set_LLDP_rule(event.connection)
        log.debug('Discovery: switch %i connected'%(event.dpid))
        self.graph.switch_is_up(event.dpid)
        
    def set_LLDP_rule(self,connection):
        '''
        set a flow rule in the switch
        to pass all LLDP packets to the controller
        '''
        # should i delete old rules ?
                
        fm = of.ofp_flow_mod()
        fm.match.dl_type = ethernet.LLDP_TYPE
        fm.match.dl_dst = Discovery.LLDP_DST_ADDR

        # Add an action to send to the specified port
        action = of.ofp_action_output(port=of.OFPP_CONTROLLER)  # @UndefinedVariable
        fm.actions.append(action)
        # Send message to switch
        connection.send(fm)
        
    def _handle_ConnectionDown(self, event):
        '''
        Will be called when a switch goes down. Use event.dpid for switch ID.
        '''
        event_to_delete = [up_event for up_event in self.connected_switches if up_event.dpid == event.dpid][0]
        self.connected_switches.remove(event_to_delete)
        log.debug('Discovery: switch %i disconnected'%(event.dpid))
        
        removed_links=self.graph.switch_is_down(event.dpid)
        for (s1,p1,s2,p2) in removed_links:
            log.debug('Link is removed due to [switch %i is down]: (%i %i)<->(%i %i)' % (event.dpid,s1,p1,s2,p2))
        self._tree_changed()
        
    def _handle_PortStatus(self, event):
        '''
        Will be called when a link changes. Specifically, when event.ofp.desc.config is 1, it means that the link is down. Use event.dpid for switch ID and event.port for port number.
        '''
        dpid=event.dpid
        port=event.port
        if event.ofp.desc.config == 1:
            log.debug('[switch %i]: port %i was disconnected'%(dpid, port))
            links = self.graph.get_all_links_for_switch_and_port(dpid, port)
            for (s1,p1,s2,p2) in links:
                if self.graph.link_is_dead(s1, p1, s2, p2):
                    log.debug('Link is removed due to [port closed]: (%i %i)<->(%i %i)' % (s1,p1,s2,p2))
            if (len(links)>0):
                self._tree_changed()
            
                
    def _handle_PacketIn(self, event):
        '''
        Will be called when a packet is sent to the controller. Same as in the previous part. Use it to find LLDP packets (event.parsed.type == ethernet.LLDP_TYPE) and update the topology according to them.
        '''
        if event.parsed.type != ethernet.LLDP_TYPE:
            return
        
        pkt = event.parsed
        lldp_p = pkt.payload
        ch_id = lldp_p.tlvs[0]
        po_id = lldp_p.tlvs[1]
        src_dpid = int(ch_id.id)
        src_port = int(po_id.id)
        #log.debug("Received a LLDP packet on switch %i from %i" % (event.dpid,src_dpid))
        if self.graph.link_is_alive( src_dpid, src_port, event.dpid, event.port):
            #a new link
            log.debug("New link was found: (%i %i)<->(%i %i)" % (src_dpid,src_port,event.dpid,event.port))
        
        self._tree_changed()
            
        
    def register_tree_change(self,handler):
        self.handlers.append(handler)

    def _tree_changed(self):
        self.change_lock.acquire()
        try:
            allowed=self.graph.get_all_allowed_links()
            forbidden=self.graph.get_all_forbidden_links()
            to_add=self.graph.get_enteries_to_add()
            to_remove=self.graph.get_enteries_to_remove()
            for handler in self.handlers:
                handler.handle(allowed,forbidden)
            
            for (s1,p1,s2,p2) in to_add:
                self.graph.enteries_added(s1, p1, s2, p2)
                
            for (s1,p1,s2,p2) in to_remove:
                self.graph.enteries_removed(s1, p1, s2, p2)
        finally:    
            self.change_lock.release()