Exemplo n.º 1
0
 def __init__(self, frame):
     self.model = dm.DataModel()
     self.frame = frame
     self.buttons = list()
     self.spacing = 15
     self.declareButtons()
     self.popUp = list()
Exemplo n.º 2
0
 def reset_data(self):
     self.data_model = DataModel.DataModel()
     self.data_model.logger = Logger
     self.img = None
     self.texture = None
     self.texture_stars = None
     self.focal_length = 0
     self.crop_factor = 0
     self.img_list = None
     self.output_name = "aligned.tif"
     gc.collect()
Exemplo n.º 3
0
	def __init__(self):
		## Configuration
		self.settings = configparser.ConfigParser()
		self.settings['settings'] = {}
		self.settings['hardware'] = { 'type': 'none' }
		self.settings['view'] = {
									'startFreq': '1000000',
									'stopFreq': '10000000',
									'showMarker': 'True',
									'colorScheme': '1',
									'colorScheme': '1',
									'dBDivIndex': '4',
									'numSamplesIndex': '2',
									'refLevel': '10'
								}

		self.model = DataModel.DataModel()
Exemplo n.º 4
0
    def __init__(self):

        super(RaspberryController, self).__init__()

        self.instance = None

        self.setupUi(self)

        self.setup_actions()

        self.setup_openmic_controller()

        self.data_model = DataModel.DataModel(self.agentsComboBox,
                                              self.tasksComboBox,
                                              self.showTaskAgentsComboBox)

        self.initialize_ui()

        self.show()
    def __init__(
            self, n=100000, border_value=1.5,
            input_parameters_below_border=(1,),
            input_parameters_above_border=(0.5,),
            output_parameters_below_border=(0.5,),
            output_parameters_above_border=(1,),
            input_intensity=(0.5,),
            output_intensity=(1,),
            input_distribution="exponential",
            output_distribution="exponential",
            input_time_distribution="exponential",
            output_time_distribution="exponential",
            ):

        # distribution law of volumes of receipt of the resource
        self._input_distribution = input_distribution

        # distribution law of volumes of resource loss
        self._output_distribution = output_distribution

        # distribution law of lengths of time intervals
        self._input_time_distribution = input_time_distribution
        self._output_time_distribution = output_time_distribution

        # parameters of distribution law of volumes of receipt of the resource (below and above S-border)
        self._input_parameters_below_border = input_parameters_below_border
        self._input_parameters_above_border = input_parameters_above_border

        # parameters of distribution law of volumes of resource loss (below and above S-border)
        self._output_parameters_below_border = output_parameters_below_border
        self._output_parameters_above_border = output_parameters_above_border

        # parameters of distribution law of lengths of time intervals
        self._input_intensity = input_intensity
        self._output_intensity = output_intensity

        # number of iterations
        self._n = n

        self._border_value = border_value

        self._data = DataModel.DataModel(n)
Exemplo n.º 6
0
parser.add_argument('-f',
                    '--focalLength',
                    dest='focal',
                    type=float,
                    default=16.5,
                    help='Focal length (default: 16.5)')

args = parser.parse_args()

if args.debug:
    logging_level = logging.DEBUG
    logging_format = "%(asctime)s (%(name)s) [%(levelname)s] line %(lineno)d: %(message)s"
    logging.basicConfig(format=logging_format, level=logging_level)

keepInterim = args.keepInterim
data_model = DataModel.DataModel()
outputName = args.output

for p in reorder_images(args.images):
    logging.debug("image: %s", p)
    data_model.add_image(p)

ref_img = data_model.images[0]
f = args.focal
logging.debug("Setting focal length to %f", f)

img_shape = ref_img.fullsize_gray_image.shape
img_size = np.array([img_shape[1], img_shape[0]])
data_model.reset_result()

pts, vol = DataModel.ImageProcessing.detect_star_points(
Exemplo n.º 7
0
    gender_records = data_model.sort_by_gender()
    return gender_records.to_json(orient='records', force_ascii=False)


@app.route("/records/birthdate", methods=["GET"])
def get_bday_records():
    bday_records = data_model.sort_by_birthdate()
    return bday_records.to_json(orient='records', force_ascii=False)


@app.route("/records/name", methods=["GET"])
def get_name_records():
    name_records = data_model.sort_by_name()
    return name_records.to_json(orient='records', force_ascii=False)


@app.route("/records", methods=["POST"])
def add_record():
    line = request.get_data()
    if data_model.add_line_to_record(line):
        return data_model.records.to_json(orient='records', force_ascii=False)
    else:
        return "Invalid Format"


if __name__ == '__main__':
    input_path_list = CLI().prompt()
    data_model = dm.DataModel(input_path_list)
    print(data_model.records)
    app.run(port=8080)
Exemplo n.º 8
0
def the_great_looper(key_list, stock_names_list, kws_list):
    start_timing = datetime.now()
    print(f'Start time: {start_timing}')
    # create a number of datamodels first
    # the kw lists

    # create some table to store the data after run
    # and then save it somewhere after run
    result = pd.DataFrame()

    experiment_number = 1

    iterations = 20
    number_of_data_models = len(key_list)

    total_experiment_count = iterations * number_of_data_models

    for key, stock_name, kws in zip(key_list, stock_names_list, kws_list):

        # form the data model
        m = DataModel.DataModel(
            f'{key}_False_by_week.csv',  # the trend file
            f'{stock_name}.csv',  # the stock file
            'close_open',
            'somefilenamehere')

        # let's loop around the weeks to predict....also, but later
        # loop around number of epochs and learning rate, but later

        m.form_X_y(weeks_to_predict=8,
                   scaled=False,
                   div_100=True,
                   flatten=False)

        for _ in range(iterations):

            print(
                f'running experiment {experiment_number} / {total_experiment_count}'
            )
            train_loader, val_loader = pyTorchModel.get_train_and_test_data_loader_from_data_model(
                m, percentage_in_train_set=0.7)

            lr = 0.001
            n_epochs = 600
            kernel_size = 3

            trained_model, train_losses, val_losses = pyTorchModel.train_kit(
                train_loader,
                val_loader,
                number_of_kws=len(m.keywords),
                kernel_size=kernel_size,
                pool_size=1,
                weeks_to_predict=m.weeks_to_predict,
                lr=lr,
                n_epochs=n_epochs)

            # the train_kit should return the model and train losses and val losses
            total_accuracy, one_zero_accuracy, zero_one_accuracy = pyTorchModel.eval_kit(
                val_loader, trained_model)

            print(
                f'avg train losses is {sum(train_losses) / len(train_losses)}')
            print(f'avg val losses is {sum(val_losses) / len(val_losses)}')

            # document the result
            # result_cols = ['key', 'kws', 'weeks_to_predict', 'train_loss', 'val_loss', 'lr', 'n_epochs',
            #            'total_accuracy', 'one_zero_accuracy', 'zero_one_accuracy'],
            result.loc[experiment_number, 'key'] = key
            result.loc[experiment_number, 'kws'] = kws
            result.loc[experiment_number,
                       'weeks_to_predict'] = m.weeks_to_predict
            result.loc[experiment_number, 'train_loss'] = float(
                sum(train_losses) / len(train_losses))
            result.loc[experiment_number,
                       'val_loss'] = float(sum(val_losses) / len(val_losses))
            result.loc[experiment_number, 'lr'] = lr
            result.loc[experiment_number, 'n_epochs'] = n_epochs
            result.loc[experiment_number, 'total_accuracy'] = total_accuracy
            result.loc[experiment_number,
                       'one_zero_accuracy'] = one_zero_accuracy
            result.loc[experiment_number,
                       'zero_one_accuracy'] = zero_one_accuracy
            result.loc[experiment_number, 'kernel_size'] = kernel_size
            # result.loc[experiment_number, 'key'] = key
            # result.loc[experiment_number, 'key'] = key
            experiment_number += 1

    result.to_csv(
        os.path.join(general_path(), 'result7_only_bio_meaningful.csv'))
    end_timing = datetime.now()
    print(f'time taken: {end_timing - start_timing}')
    return result