def main():
    validation = Validation('standard_data.json')
    invalid = collections.defaultdict(set)

    dir = os.path.dirname(__file__)
    filename_in = os.path.join(dir, 'budapest_hungary.osm')
    for _, element in ET.iterparse(filename_in):
        address = get_address(element)
        if address is not None:

            # validate each keys one by one
            for key in address.keys():
                value = address[key]

                # do not have to validate if we know it is invalid
                if value not in invalid[key]:

                    # if invalid
                    if validation.validate(key, value) is False:

                        # store it to make sure we do not repeat this report
                        invalid[key].add(value)

                        # report the invalid value
                        print 'Invalid {0}: "{1}"'\
                            .format(key, value.encode('utf-8'))
Ejemplo n.º 2
0
def build(save = False):
    infos = AllFolds()
    val = Validation(infos)
    val.buildPredictors()
    if save:
        val.savePredictors()
    return val 
Ejemplo n.º 3
0
	def add(self, name, space):
		name = name + '.app'

		validate = Validation()
		if validate.matchAgainstDirectory(name):
			self.storage.addApplication(name, space)
		
		fuzzy_result = validate.fuzzyAgainstDirectory(name)
		if fuzzy_result != None:
			self.storage.addApplication(fuzzy_result, space)
 def __init__(self,
         data=None,
         model=None,
         training_data_span_months=1):
     self._data_df = data
     self._model = model
     self.training_data_span_months = training_data_span_months
     self.validation = Validation(data)
    def fit(self, training_data):
        # self.training_data = training_data
        self.validation = Validation(training_data)
        self._training_data = training_data

        # Want to go for minimum mean training error * std.dev

        if self.should_optimize:
            optimized_args = self.optimize_interpolator()
            self.x_offset = optimized_args[0]
            self.x_divisor = optimized_args[1]
Ejemplo n.º 6
0
	def __init__(self):
		#importing the client name and raw table name passed as parameters
		clientName = sys.argv[1]
		fileName = sys.argv[2]
		
		#if needed to give filepath and validate a file not in the same folder
		#fileNamewithPath = sys.argv[2]
		#tempfileName = re.findall('[A-Za-z0-9_-\s]+.csv',fileNamewithPath)
		#fileName = tempfileName[0]
		
		fileInfo = FileInfo()
		fileInfo.setFileInfo(fileName)
		sortFileType = SortFileType()
		fileType,rawTableName = sortFileType.getFileType(clientName,fileName)
		#print fileType
		#print rawTableName
		
		fileReader = FileReader(fileName)
		tableMetadata = TableMetadata()
		validation = Validation()
		error = None
		
		fileColumns = fileReader.readHeader()
		dbColumns = tableMetadata.readTableHeader(clientName,rawTableName)
		
		if validation.validateHeaders(dbColumns,fileColumns):
			dataType = tableMetadata.readMetadata(clientName,rawTableName)
			fileRows = fileReader.readRows()
			
			for eachRow in fileRows :	
				self.rowNum = self.rowNum + 1
				if validation.validateRow(dataType,eachRow,self.rowNum) is False:
					error = "Error in dataType"
					break	
		else:
			error =  "Error in header"
		
		if error:
			print error
		else:	
			print"File sucessfully validated"
Ejemplo n.º 7
0
def signup():
    if request.method == "POST":
        un = request.form['username']
        email = request.form['email']
        password = request.form['password']

        v = Validation()
        if v.check_validation(un, email, password):
            #put the user in the db
             u = models.User(username = un, email = email, password = password)
             db.session.add(u)
             db.session.commit()
             # render the template where the user is logged in 
             return render_template("user_logged.html")
        else:
            #validation did not succeed, showing the user a flash message
            flash("Sorry, but some of your input is not enough secured")
            return render_template("signup.html")

    #get request for signup
    return render_template("signup.html")
Ejemplo n.º 8
0
    def __init__(self):

        # Instantiate a data access object
        # Contains methods to access the database
        self.staff_dao = StaffDAO()

        # Instantiate a validation object
        # Contains methods to validate input fields
        self.validator = Validation()

        # Form fields
        # Instantiate stringvars - hold  data entered in  fields of form
        self.staff_id = tk.StringVar()
        self.staff_name = tk.StringVar()
        self.staff_surname = tk.StringVar()
        self.staff_password = tk.StringVar()

        # List of staff ids - lb for listbox
        self.lb_ids = None

        # Messagebox title
        self.mb_title_bar = "Staff CRUD"

        pass
Ejemplo n.º 9
0
def creat_task():
    task_info = request.get_json()
    if "title" in task_info.keys():
        title = task_info.get("title")
        status = "to-do"
        task_id = len(todo_list)+1
        logged_user = get_jwt_identity()

        validate = Validation.validate_task(title)
        if validate:
            return jsonify({"message": validate}), 400
        new_task = Task(task_id=task_id, title=title, owner=logged_user, status=status)
        if(new_task.create_task()):
            return jsonify({"New task Created": new_task.__dict__}), 201
        return jsonify({"message": "Task not added"}), 400
    return jsonify({"message": "a 'key' is missing in your task body"}), 400
Ejemplo n.º 10
0
 def make_move(self, tictacgame):
     if self.option == Level.CUSTOM:
         while True:
             coordinates = input("Enter the coordinates > ")
             if not Validation.check_move_validity(coordinates,
                                                   tictacgame.matrix):
                 continue
             x, y = coordinates.split()
             x = int(x)
             y = int(y)
             tictacgame.make_move(x - 1, y - 1)
             tictacgame.output()
             break
     elif self.option == Level.EASY or self.option == Level.MEDIUM or self.option == Level.HARD:
         tictacgame.make_computer_move(self.option, self)
         tictacgame.output()
Ejemplo n.º 11
0
    def __init__(self, data):
        SkuBase.__init__(self, data)

        if 'commentList' in self.data.keys():

            comments = json.loads(self.data.pop('commentList'))

            self.data['commentList'] = list()

            for comment in comments:

                comment['commentData'] = unhexlifyUtf8(
                    comment.pop('commentData'))
                if Validation.isCommentBad(comment['commentData']):
                    continue

                self.data['commentList'].append(comment)
Ejemplo n.º 12
0
def generateList(linked_list: Linked_list, lname='l'):
    options = ('🗿 🗿 🗿 🗿 🗿 🗿 🗿 🗿\n' + '🗿 1 - Strategy 1    🗿\n' +
               '🗿 2 - Strategy 2    🗿\n' + '🗿 3 - generate data 🗿\n' +
               '🗿 4 - print list    🗿\n' + '🗿 5 - exit          🗿\n' +
               '🗿 🗿 🗿 🗿 🗿 🗿 🗿 🗿\n')
    context = Context(StrategyIterator)
    while True:
        print(options)
        choice = v.intValidateInRange('Enter choice ', 1, 5)
        if choice == 1: context.strategy = StrategyIterator()
        elif choice == 2: context.strategy = StrategyReadFile()
        elif choice == 3:
            linked_list = generateMenu(linked_list, context, lname)
        elif choice == 4:
            print(linked_list)
        elif choice == 5:
            return linked_list
Ejemplo n.º 13
0
def register_user():
    reg_info = request.get_json()
    search_keys = ("username", "email", "password")

    if all(key in reg_info.keys() for key in search_keys):
        user_name = reg_info.get("username")
        email    = reg_info.get("email")
        password = reg_info.get("password")
        
        validate = Validation.auth_validation(user_name=user_name, email=email, password=password)
        if validate:
            return jsonify({"message": validate}), 400

        new_user = User(username=user_name, email=email, password=password)
        if (new_user.add_account()):
            return jsonify({"New User Created": new_user.__dict__}), 201
        else:
            return jsonify({"message": "User not added or username already exists"}), 400
    return jsonify({"message": "a 'key(s)' is missing in your registration body"}), 400   
Ejemplo n.º 14
0
 def buy_product(self):
     self.display_product(False)
     max_index = len(Product.list_products())
     while 1:
         i = Validation.get_int_input(
             input(
                 "Enter Index of Product You Want To Buy or 0 to cancel : "
             ),
             max_index,
             True,
         )
         if i == 0:
             break
         current_product = Product.list_products()[i - 1]
         if int(current_product.stock_quanitity) > 0:
             current_product.purchase_product(Authentication.user)
             break
         else:
             print("Out Of Stock")
     self.logged_in_menu()
Ejemplo n.º 15
0
def login():
    login_info = request.get_json()
    search_keys = ("username", "password")

    if all(key in login_info.keys() for key in search_keys):
        user_name = login_info.get("username").strip()
        password = login_info.get("password")

        validate = Validation.login_validation(user_name, password)
        if validate:
            return jsonify({"message": validate}), 400
        login = User.login(username=user_name, password=password)
        user_token = {}
        if login:
            access_token = create_access_token(identity= user_name)
            refresh_token = create_refresh_token(identity=user_name)
            user_token["access_token"] = access_token
            user_token["refresh_token"] = refresh_token
            return jsonify(user_token), 200

        return jsonify({"message": "user does not exist, register and login again"}), 404
    return jsonify({"message": "a 'key(s)' is missing in login body"}), 400
Ejemplo n.º 16
0
def add_user():
    request_form = [('alpha', 'first_name', request.form['first_name']),
                    ('alpha', 'last_name', request.form['last_name']),
                    ('email', 'email', request.form['email']),
                    ('pass_check', 'password', request.form['password'],
                     request.form['confirm_password'], 8, 16)]

    sanitize = Validation(request_form)

    if len(sanitize.errors) == 0:
        query = 'INSERT INTO users (first_name, last_name, email, password, created_at, updated_at) VALUES ( :first_name, :last_name, :email, :password, now(), now())'
        data = sanitize.data
        user_id = mysql.query_db(query, data)
        flash("You have successfully registered! You user ID is " +
              str(user_id) + ".")
        if 'data' in session:
            session.pop('data')
        return redirect('/')
    else:
        session['data'] = sanitize.data
        for error in sanitize.errors:
            flash(error)
        return redirect('/')
Ejemplo n.º 17
0
def add_user():
    request_form = [('alpha', 'first_name', request.form['first_name']),
                    ('alpha', 'last_name', request.form['last_name']),
                    ('email', 'email', request.form['email']),
                    ('pass_check', 'password', request.form['password'],
                     request.form['confirm_password'], 8, 16)]

    sanitize = Validation(request_form)

    email_query = 'SELECT id FROM users WHERE email = :email'
    email_exists = mysql.query_db(email_query,
                                  {'email': request.form['email']})

    if len(email_exists) > 0:
        sanitize.errors.append('User exists with email provided.')

    if len(sanitize.errors) == 0:
        data = sanitize.data
        query = 'INSERT INTO users (first_name, last_name, email, password, salt, created_at, updated_at) VALUES ( :first_name, :last_name, :email, :password, :salt, now(), now())'
        user_id = mysql.query_db(query, data)
        flash('You have successfully registered! You user ID is ' +
              str(user_id) + '.')
        session['user'] = {
            'status': 'active',
            'user_id': user_id,
            'first_name': sanitize.data['first_name'],
            'last_name': sanitize.data['last_name'],
            'email': sanitize.data['email'],
        }
        if 'data' in session:
            session.pop('data')
        return redirect('/')
    else:
        session['data'] = sanitize.data
        for error in sanitize.errors:
            flash(error)
        return redirect('/register')
Ejemplo n.º 18
0
    def train(self,
              x_chunks_dir,
              y_chunks_dir,
              model_name,
              logs_dir,
              mode,
              batch_size,
              model_dir,
              official_perl_rouge=True,
              start_epoch_no=1,
              training_on_collocations=False):
        total_start_time = time.time()
        avg_loss_per_batches_list = [
        ]  # list of avg loss per a number of batches
        avg_loss_per_epoch = []
        rouge1_per_epoch_list = []
        rouge2_per_epoch_list = []
        rougeL_per_epoch_list = []
        best_loss = {
            'loss': 999888,
            'epoch': 0,
            'batch': 0
        }  # a huge initial value of best_loss
        best_rouge1_f1 = {'rouge': 0.0, 'epoch': 0, 'batch': 0}
        best_rouge2_f1 = {'rouge': 0.0, 'epoch': 0, 'batch': 0}
        best_rougeL_f1 = {'rouge': 0.0, 'epoch': 0, 'batch': 0}

        if not os.path.exists(logs_dir):
            os.makedirs(logs_dir)
        if not os.path.exists(model_dir):
            os.makedirs(model_dir)

        model_checkpoint_filename = model_name + '_checkpoint.txt'
        now = datetime.datetime.now()
        logfile_name = str(
            now.strftime(model_name + '_%Y%m%d_%H%M_train_logs.txt'))
        logfile_path = logs_dir + logfile_name
        logfile_writer_e = None
        logfile_writer_w = open(logfile_path, 'w', encoding='utf8')
        sess = None
        try:
            print_str = 'Logs\nLogs File: {}\nMode: {}\nModel: {}\nstarting: {}\nParameters:\n{}\n' \
                        'Loading dictionary and training dataset...\n' \
                        ''.format(logfile_name, mode, model_name, now.strftime("%Y-%m-%d %H:%M"), self.parameters_str)
            print(print_str, end='')
            logfile_writer_w.write(print_str)

            word2int_dict, int2word_dict, article_max_len, summary_max_len, vocabulary_size = self.load_training_data(
            )
            summary_max_len += 1  # because of adding </S> or <S>
            x_and_y_file_path_pairs_list = self.file_path_pairs(
                x_chunks_dir=x_chunks_dir, y_chunks_dir=y_chunks_dir)

            num_of_chunks = len(x_and_y_file_path_pairs_list)
            num_of_batches_per_epoch = self.num_of_batches_per_epoch(
                x_and_y_file_path_pairs_list, batch_size)

            print_str = 'Batches per epoch: {}\n' \
                        'vocabulary_size: {}\n' \
                        'Article and summary max_len: {}, {}\n'.format(num_of_batches_per_epoch, vocabulary_size,
                                                                       article_max_len, summary_max_len)
            print(print_str, end='')
            logfile_writer_w.write(print_str)

            sess = tf.Session()  # as sess:

            print("Loading word2vec...")
            word2vec_embeddings = Model.get_init_embedding(
                int2word_dict, param.embedding_dim, paths.word2vec_file_path)
            print_str = 'Word embeddings have been loaded.\n'
            print(print_str, end='')
            logfile_writer_w.write(print_str)

            model = Model(
                article_max_len,
                summary_max_len,
                param.embedding_dim,
                param.hidden_dim,
                param.layers_num,
                param.learning_rate,
                param.beam_width,
                param.keep_prob,
                vocabulary_size,
                batch_size,
                word2vec_embeddings,
                forward_only=False,
                using_word2vec_embeddings=param.using_word2vec_embeddings)

            model_validation = Model(article_max_len,
                                     summary_max_len,
                                     param.embedding_dim,
                                     param.hidden_dim,
                                     param.layers_num,
                                     param.learning_rate,
                                     param.beam_width,
                                     param.keep_prob,
                                     vocabulary_size,
                                     batch_size,
                                     word2vec_embeddings,
                                     forward_only=True,
                                     using_word2vec_embeddings=False)

            sess.run(tf.global_variables_initializer())
            saver = tf.train.Saver(tf.global_variables(), max_to_keep=15)

            if param.train_restored_saved_model and os.path.exists(
                    model_dir + model_checkpoint_filename):
                print('Restoring the saved model.')
                logfile_writer_w.write('Restoring the saved model.\n')
                ckpt = tf.train.get_checkpoint_state(
                    checkpoint_dir=paths.model_dir,
                    latest_filename=model_checkpoint_filename)
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                print('Training of new model.')
                logfile_writer_w.write('Training of new model.\n')
            logfile_writer_w.close()
            epoch = start_epoch_no
            epoch_no = start_epoch_no
            end_epoch_no = epoch_no + param.epochs_num - 1
            remaining_epochs = end_epoch_no - start_epoch_no
            for _ in range(param.epochs_num):
                end_epoch_no = epoch + remaining_epochs
                remaining_epochs -= 1
                logfile_writer_e = open(logfile_path, 'a', encoding='utf8')
                epoch_loss_list = []
                epoch_start_time = time.time()
                print_str = '\n{} of {} epoch\n'.format(epoch, end_epoch_no)
                print(print_str, end='')
                logfile_writer_e.write(print_str)

                x_and_y_file_path_pairs_list = shuffle(
                    x_and_y_file_path_pairs_list,
                    random_state=round(time.time() // 12345))
                chunk_index = 0
                for (x_file_path, y_file_path) in x_and_y_file_path_pairs_list:
                    #chunk_start_time = time.time()
                    chunk_index += 1
                    print_str = '\t{} of {} chunk (files: {} & {})\n'.format(
                        chunk_index, num_of_chunks, x_file_path[-60:],
                        y_file_path[-60:])
                    print(print_str, end='')
                    logfile_writer_e.write(print_str)

                    train_x, train_y, _ = self.load_and_shuffle_training_set(
                        x_file_path, y_file_path)
                    # if param.training_size is not None:
                    # remaining_training_size = remaining_training_size - current_training_size

                    print_str = '\t\tx & y chunk shape: {}, {}\n'.format(
                        np.array(train_x).shape,
                        np.array(train_y).shape)
                    logfile_writer_e.write(print_str)
                    print(print_str, end='')

                    batches = self.batch_iter_v2(train_x, train_y, batch_size)
                    num_batches_of_chunk = (len(train_x) - 1) // batch_size + 1

                    print("\t\tBatches of chunk: {}".format(
                        num_batches_of_chunk))
                    # print("Iteration starts.")

                    logfile_writer_e.write("\t\tBatches of chunk: {}\n".format(
                        num_batches_of_chunk))
                    # logfile_writer.write("\nIteration starts.\n")

                    batch_index = 0
                    batches_start_time = time.time()
                    batches_loss_list = []
                    for batch_x, batch_y in batches:
                        batch_index += 1
                        batch_x_len = list(
                            map(lambda x: len([y for y in x if y != 0]),
                                batch_x))
                        batch_decoder_input = list(
                            map(lambda x: [word2int_dict["<S>"]] + list(x),
                                batch_y))  # y starts with <s>
                        batch_decoder_len = list(
                            map(lambda x: len([y for y in x if y != 0]),
                                batch_decoder_input))
                        batch_decoder_output = list(
                            map(lambda x: list(x) + [word2int_dict["</S>"]],
                                batch_y))  # y ends with </s>

                        batch_decoder_input = list(
                            map(
                                lambda d: d + (summary_max_len - len(d)) *
                                [word2int_dict["<PAD>"]], batch_decoder_input))
                        batch_decoder_output = list(
                            map(
                                lambda d: d + (summary_max_len - len(d)
                                               ) * [word2int_dict["<PAD>"]],
                                batch_decoder_output))

                        train_feed_dict = {
                            model.batch_size: len(batch_x),
                            model.X: batch_x,
                            model.X_len: batch_x_len,
                            model.decoder_input: batch_decoder_input,
                            model.decoder_len: batch_decoder_len,
                            model.decoder_target: batch_decoder_output
                        }

                        fetches = [model.update, model.global_step, model.loss]
                        _, batch_no, loss = sess.run(fetches,
                                                     feed_dict=train_feed_dict)
                        epoch_loss_list.append(loss)
                        batches_loss_list.append(loss)
                        if batch_no % param.print_loss_per_steps == 0:
                            avg_batch_loss = np.average(
                                np.array(batches_loss_list))
                            avg_loss_per_batches_list.append(avg_batch_loss)
                            batches_loss_list = []
                            print_str = "\t\tEpoch: {} Batch: {}, Loss: {:.5f}, " \
                                        "Time (batches, epoch & total): {}, {} & {}\n" \
                                        "".format(epoch, batch_no, avg_batch_loss,
                                                  datetime.timedelta(
                                                      seconds=round(time.time() - batches_start_time, 0)),
                                                  datetime.timedelta(
                                                      seconds=round(time.time() - epoch_start_time, 0)),
                                                  datetime.timedelta(
                                                      seconds=round(time.time() - total_start_time, 0)))
                            print(print_str, end='')
                            logfile_writer_e.write(print_str)
                            batches_start_time = time.time()

                        # end of epoch --> saving the best model and print validation values
                        if batch_no % num_of_batches_per_epoch == 0:  # end of epoch
                            epoch = batch_no // num_of_batches_per_epoch
                            temp = np.array(epoch_loss_list)
                            avg_epoch_loss = np.average(temp)
                            del temp
                            avg_loss_per_epoch.append(avg_epoch_loss)
                            file_prefix = '{}_{}epoch_{}batch'.format(
                                model_name, self.int_to_two_digits_str(epoch),
                                batch_no)
                            validation = Validation()
                            rouge_scores_dict = \
                                validation.rouge_scores_of_validation_set(sess,
                                                                          model_validation,
                                                                          int2word_dict,
                                                                          batch_size,
                                                                          file_prefix=file_prefix,
                                                                          official_perl_rouge=official_perl_rouge,
                                                                          #training_on_collocations=training_on_collocations
                                                                          )
                            rouge1_per_epoch_list.append(
                                rouge_scores_dict['rouge_1_f_score'])
                            rouge2_per_epoch_list.append(
                                rouge_scores_dict['rouge_2_f_score'])
                            rougeL_per_epoch_list.append(
                                rouge_scores_dict['rouge_l_f_score'])
                            saving_flag = False

                            if rouge_scores_dict[
                                    'rouge_1_f_score'] > best_rouge1_f1[
                                        'rouge']:
                                best_rouge1_f1['rouge'] = rouge_scores_dict[
                                    'rouge_1_f_score']
                                best_rouge1_f1['epoch'] = epoch
                                best_rouge1_f1['batch'] = batch_no
                                saving_flag = True
                            if rouge_scores_dict[
                                    'rouge_2_f_score'] > best_rouge2_f1[
                                        'rouge']:
                                best_rouge2_f1['rouge'] = rouge_scores_dict[
                                    'rouge_2_f_score']
                                best_rouge2_f1['epoch'] = epoch
                                best_rouge2_f1['batch'] = batch_no
                                saving_flag = True
                            if rouge_scores_dict[
                                    'rouge_l_f_score'] > best_rougeL_f1[
                                        'rouge']:
                                best_rougeL_f1['rouge'] = rouge_scores_dict[
                                    'rouge_l_f_score']
                                best_rougeL_f1['epoch'] = epoch
                                best_rougeL_f1['batch'] = batch_no
                                saving_flag = True
                            if avg_epoch_loss < best_loss['loss']:
                                best_loss['loss'] = avg_epoch_loss
                                best_loss['epoch'] = epoch
                                best_loss['batch'] = batch_no
                                saving_flag = True
                                # Save all variables of the TensorFlow graph to file.

                            print_str = 'Model: {}\n' \
                                '   Avg_epoch_loss  (current & best): {:.5f} & {:.5f} (epoch {}, batch {})\n' \
                                '   Rouge_1_f-score (current & best): {:.5f} & {:.5f} (epoch {}, batch {})\n' \
                                '   Rouge_2_f-score (current & best): {:.5f} & {:.5f} (epoch {}, batch {})\n' \
                                '   Rouge_L_f-score (current & best): {:.5f} & {:.5f} (epoch {}, batch {})\n' \
                                '   Time (epoch & total): {} & {}.\n'.format(model_name,
                                    avg_epoch_loss, best_loss['loss'], best_loss['epoch'], best_loss['batch'],
                                    rouge_scores_dict['rouge_1_f_score'],
                                    best_rouge1_f1['rouge'], best_rouge1_f1['epoch'], best_rouge1_f1['batch'],
                                    rouge_scores_dict['rouge_2_f_score'],
                                    best_rouge2_f1['rouge'], best_rouge2_f1['epoch'], best_rouge2_f1['batch'],
                                    rouge_scores_dict['rouge_l_f_score'],
                                    best_rougeL_f1['rouge'], best_rougeL_f1['epoch'], best_rougeL_f1['batch'],
                                    datetime.timedelta(seconds=round(time.time() - epoch_start_time, 0)),
                                    datetime.timedelta(seconds=round(time.time() - total_start_time, 0)))

                            if saving_flag:
                                current_model_name = model_name + '_' + self.int_to_two_digits_str(
                                    epoch) + 'epoch.ckpt'
                                # model_checkpoint_filename = model_name + 'checkpoint'
                                checkpoint = saver.save(
                                    sess=sess,
                                    save_path=model_dir + current_model_name,
                                    latest_filename=model_checkpoint_filename,
                                    global_step=batch_no)
                                add_str = '\nEpoch {}: The model is saved as the best one (...{}).\n' \
                                          ''.format(epoch, checkpoint[-25:])
                                print(add_str + print_str, end='')
                                logfile_writer_e.write(add_str + print_str)
                            else:
                                add_str = '\nEpoch {}: Τhe model was not saved as it is not the best one.\n' \
                                          ''.format(epoch)
                                print(add_str + print_str, end='')
                                logfile_writer_e.write(add_str + print_str)
                            epoch = epoch + 1
                logfile_writer_e.close()

        except KeyboardInterrupt:
            if logfile_writer_w is not None:
                logfile_writer_w.close()
            if logfile_writer_e is not None:
                logfile_writer_e.close()
            #print_keyboard_interrupt_str = '\nException: KeyboardInterrupt\n'
            logfile_writer = open(logfile_path, 'a', encoding='utf8')
            print_str = '\n' + "-" * 60 + '\nException: KeyboardInterrupt\n'
            print(print_str)
            logfile_writer.write(print_str)
            traceback.print_exc(file=sys.stdout)
            traceback.print_exc(file=logfile_writer)
            print_str = "-" * 60
            print(print_str)
            logfile_writer.write(print_str)
            logfile_writer.close()
        except Exception:
            if logfile_writer_w is not None:
                logfile_writer_w.close()
            if logfile_writer_e is not None:
                logfile_writer_e.close()
            logfile_writer = open(logfile_path, 'a', encoding='utf8')
            print_str = '\n' + "-" * 60 + '\n' + 'Exception:\n'
            print(print_str)
            logfile_writer.write(print_str)
            traceback.print_exc(file=sys.stdout)
            traceback.print_exc(file=logfile_writer)
            print_str = "-" * 60
            print(print_str)
            logfile_writer.write(print_str)
            logfile_writer.close()
        finally:
            self.finally_of_train_method(
                logfile_path, avg_loss_per_batches_list, avg_loss_per_epoch,
                rouge1_per_epoch_list, rouge2_per_epoch_list,
                rougeL_per_epoch_list, total_start_time, now)
            sess.close()
class OptimizedTimezoneProbModel():
    def __init__(self):
        self.training_data_span_months = 15

    def fit(self, training_data):
        self.validation = Validation(training_data)
        self._training_data = training_data
        self.smoother = self.optimize_smoother()


        # get validation set
        # optimize the smoother using validation set
    def optimize_smoother(self, default=np.array([0.01, 0.01, 0.01, 0.01, 0.01, 0.01])):

        cons = ({'fun': lambda x: x[0], 'type': 'ineq' },
                {'fun': lambda x: x[1], 'type': 'ineq' },
                {'fun': lambda x: x[2], 'type': 'ineq' },
                {'fun': lambda x: x[3], 'type': 'ineq' },
                {'fun': lambda x: x[4], 'type': 'ineq' },
                {'fun': lambda x: x[5], 'type': 'ineq' },
                {'fun': lambda x: 6 - sum(x), 'type': 'ineq' }) # only use low weights

        val = minimize(self.evaluate_training_error,
                default,
                method='COBYLA',
                constraints=cons,
                options={'maxiter': 50, 'disp': True})

        return val.x


    def evaluate_training_error(self, smoother):
        self.smoother = smoother

        errors = []
        for i in range(0,self.num_months()-self.training_data_span_months):
            training_data = self.training_data(i, self.training_data_span_months)
            test_data = self.test_data(i+self.training_data_span_months)
            pred = self.predict(BusinessForecast(test_data).convert(),
                    training_data=training_data)
            actual = ActualSchedule(test_data).bins()

            errors.append(mean_squared_error(pred, actual))

        sum_errors = np.array(errors).sum()
        print "Smoother: {}, error: {}".format(self.smoother, sum_errors)
        return sum_errors

    def all_smoothing_values_positive(self):
        truth = True

        for i in self.smoother:
            truth = truth & (i >= 0)

        return truth


    def predict(self, business_forecast, training_data=pd.DataFrame()):
        sum_bins = np.zeros(6)
        for i in business_forecast:

            sched = ActualSchedule(training_data[\
                    training_data.user_tz == i['user_tz']])
            stuff = (sched.bins() + self.smoother) / (sched.bins() + self.smoother).sum()
            sum_bins +=  stuff * i['frequency'] * i['schedule_type']

        return sum_bins

    #private
    def size(self):
        return self.validation.size()

    def num_months(self):
        return self.validation.num_months()

    def training_data(self, *args):
        return self.validation.training_data(*args)

    def year_month_index(self, *args):
        return self.validation.year_month_index(*args)

    def test_data(self, *args):
        return self.validation.test_data(*args)
Ejemplo n.º 20
0
def plot():
    validation = Validation()
    validation.plot_ea()
    validation.plot_hc()
Ejemplo n.º 21
0
# standard imports
from flask import Flask, request, render_template, session, redirect, url_for
from env import secret_key
from database import SqlOperations
from validation import Validation
from config import Config

# variable configurations
app = Flask(__name__)
app.secret_key = secret_key
config = Config()
sql = SqlOperations(config)
val = Validation(config)


@app.route("/")
def root():
    for key in list(session.keys()):
        session.pop(key)
    return render_template("index.html")


@app.route("/routes", methods=["POST", "GET"])
def routes():
    """
    /routes handles input to search for bus services between two bus stops, output the result and redirects 
    to /list_stops
    """
    # list of error messages
    err_msgs = [None] * 5
    # list of headers to be displayed
Ejemplo n.º 22
0
# The repos.json file should be readable by the user running the Flask app,
# and the absolute path should be given by this environment variable.
BASE_APP_PATH = os.path.dirname(os.path.realpath(__file__))
repos_config = json.loads(
    io.open(BASE_APP_PATH + '/config/repos.json', 'r').read())
global_config = json.loads(
    io.open(BASE_APP_PATH + '/config/global.json', 'r').read())
logging_config = json.loads(
    io.open(BASE_APP_PATH + '/config/logging.json', 'r').read())

if 'use_proxyfix' in global_config and global_config['use_proxyfix']:
    from werkzeug.contrib.fixers import ProxyFix

dictConfig(logging_config)
app = Flask(__name__)
validation = Validation(global_config)


@app.route("/", methods=['GET', 'POST'])
def index():
    if not validation.validate_ip():
        abort(403)
    if request.method == 'GET':
        return 'OK'
    elif request.method == 'POST':
        event = request.headers.get('X-GitHub-Event')
        payload = json.loads(
            request.data.decode('utf8')) if request.data else None
        handler = GithubEventHandler(app, event, payload)
        handler.set_config(global_config, repos_config)
        repo_config = handler.get_repo_config()
Ejemplo n.º 23
0
def train(param, model, forest_net, dataset):

    # Launch the graph
    with tf.Session() as sess:
        sess.run(model.init)
        step = 0
        redisual = list()
        # Keep training until reach max iterations
        sum_time = []
        while step < param.training_iters:
            s_time = float(time.time())
            #generate train data---mini batch
            #using the forest net generate hidden data : n_steps * batch_size * n_input
            batch_x, batch_y = generate_lstm_input(param, forest_net, dataset,
                                                   redisual)
            #batch_x = forest_net.get_train_data(mnist.train.images, redisual, mnist.test.images)

            m_time = float(time.time())
            sum_time.append(m_time - s_time)
            batch_x = np.transpose(batch_x, [1, 0, 2])
            #batch_y = np.reshape(batch_y,(-1,param.n_classes))

            fd = {model.xx: batch_x, model.y: batch_y}

            # Run optimization op (backprop)
            sess.run(model.optimizer, feed_dict=fd)
            #get the gradient or residual
            m1_time = float(time.time())
            grad_val = np.array(
                sess.run(tf.gradients(model.cost, model.xx), feed_dict=fd)[0])
            m2_time = float(time.time())

            redisual = param.step * np.transpose(grad_val, [1, 0, 2])
            e_time = float(time.time())
            #print (m_time-s_time, m2_time-m1_time, e_time-m2_time)
            if step % param.display_step == 0:
                # Calculate batch accuracy
                acc = sess.run(model.accuracy, feed_dict=fd)
                # Calculate batch loss
                loss = sess.run(model.cost, feed_dict=fd)
                print("Iter " + str(step) + ", Minibatch Loss= " + \
                      "{:.6f}".format(loss) + ", Training Accuracy= " + \
                      "{:.5f}".format(acc))
            step += 1
            batch_x = list()
            batch_y = list()
        print(sum(sum_time) / len(sum_time))
        print("Optimization Finished!")

        #test data

        test_label = dataset.y_test  #[:test_len]
        #test_data_temp = forest_net.forest_test_predict(test_d)
        test_data_temp = np.array(forest_net.get_test_data())
        #print (test_data_temp.shape)
        test_data = sess.run(tf.transpose(test_data_temp, [1, 0, 2]))

        fd = {model.xx: test_data, model.y: test_label}
        loss = sess.run(model.cost, feed_dict=fd)

        print("{:.6f}".format(loss))
        print("Testing Accuracy:", \
            sess.run(model.accuracy, feed_dict=fd))

        #get the predict classifier and probability value
        classifier_value = sess.run([model.classifier_collection],
                                    feed_dict={model.xx: test_data})
        proba_value = sess.run([model.proba_collection],
                               feed_dict={model.xx: test_data})

        #print (np.array(classifier_value).shape, np.array(proba_value).shape)

        v = Validation()
        result = [str(item + 1) for item in classifier_value[0]]
        v.calculateF1(dataset.y_input, result)
Ejemplo n.º 24
0
 def destroy(self):
     request = Request(self)
     url = self.base_url() + '/' + self.id
     response = request.destroy(url)
     return Validation(self).validate_response(response)
Ejemplo n.º 25
0
class Receptionist:
    """
    Receptionist class used to represent an reception py functionality of user registration and login.

    Objects:
        __db : 
            Object created for UserDB class naming __db as protected object.   
        __secure : 
            Object created for the Security class naming __secure as protected object.   
        __valid :
            Object  created for the validation class naming __valid as protected object.   

    Methods
    -------
    register
        Lets a new user to register an account with valid user name, password, Proper name, email.

    login
        Lets the existing user login to the system using valid user id and password. 

    
    """
    __db = UserDB()
    __secure = Security()
    __valid = Validation()

    def register(self):
        """
        This method takes in user information as username, password and asks to user to confirm the password
        if the user name exists already in the system it asks to choose another username.
        Then the password gets checked if its a valid one or not as well as the inserted password matching the 
        confirmation password. If the password does not match the confirmation password it shows messege as password does not match.
        Further it prompts the user for first name, last name, email id and then user object gets created using User method of the UserDB class
        and get those all user inserted data as parameters.

        Using the methods of the UserDB class the database gets created, the overall data gets inserted as well in this method. 

        """
        while True:
            username = input("Insert your UserName Please: ")

            if self.__db.isExist(
                    username
            ) is not True and self.__valid.userName_regex.match(username):
                username.lower()
                break
            print("Sorry the username is exist.")
            print("Please choose another username")
        while True:
            password = getpass.getpass("Insert your Password Please: ")
            confirmPassword = getpass.getpass("Confirm your Password : "******"password does not match")
        while True:
            firstname = str(input("Insert your First Name Please: "))
            lastname = str(input("Insert your Last Name Please: "))
            if self.__valid.name_regex.match(
                    firstname) and self.__valid.name_regex.match(lastname):
                break
            print("You should enter letters only")
        while True:
            email = input("Insert Your Email Please: ")
            if self.__valid.email_regex.match(email):
                break
            print("you should enter a vaild email address")
        self.__db.insert(username, password, firstname, lastname, email)
        self.__db.displayDB()

    def login(self):
        """
        This method asks user for user name and password and checks if the user name and password exists in the database
        if it finds the existing match it lets the user login the system.
        """
        count = 0
        while True:
            self.__db = UserDB()
            while True:
                userName = input("Insert Your UserName: "******"Insert Your Password: "******"You are not registered you should register by choose 1 from the menu"
                )
                return False
                break
            count += 1
            result = self.__db.get_Pass(userName)
            if count == 4:
                return False
                break
            else:
                if result is not None:
                    result = result[0]
                    plain_pass = self.__secure.decrypt(result, userName)

                    if bytes.decode(plain_pass) == password:
                        data = self.__db.getUserInformation(userName)
                        return data
                        break
                    else:
                        print("Username or password is wrong")
                else:
                    print("It is wrong you can try again")
Ejemplo n.º 26
0
class Inflow:
    def __init__(self):
        self.data_loader = DataLoader(cfg)
        self.__build_model()

        self.log_config('config', cfg, self.train_summary_writer, 0)

        self.validation = Validation(cfg.validation, self.data_loader,
                                     self.valid_summary_writer, self.network,
                                     'Valid')

    def log_config(self, name, config, summary_writer, step):
        general_keys = list(config.keys())
        rows = []
        for key in general_keys:
            try:
                subkeys = list(config[key])
                for subkey in subkeys:
                    rows.append(
                        ['%s.%s' % (key, subkey),
                         str(config[key][subkey])])
            except:
                rows.append([key, str(config[key])])

        hyperparameters = [tf.convert_to_tensor(row) for row in rows]
        with summary_writer.as_default():
            tf.summary.text(name, tf.stack(hyperparameters), step=step)

        return

    def __build_model(self):
        self.network = TabNet(self.data_loader.get_columns(),
                              self.data_loader.num_features,
                              feature_dim=cfg.train.feature_dim,
                              output_dim=cfg.train.output_dim,
                              num_decision_steps=cfg.train.num_decision_steps,
                              relaxation_factor=cfg.train.relaxation_factor,
                              batch_momentum=cfg.train.batch_momentum,
                              virtual_batch_size=cfg.train.virtual_batch_size,
                              num_classes=cfg.train.num_classes,
                              encoder_type=cfg.train.encoder_type,
                              epsilon=0.00001)

        lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
            cfg.train.learning_rate,
            decay_steps=cfg.train.learning_rate_decay_steps,
            decay_rate=cfg.train.learning_rate_decay_factor,
            staircase=True)
        self.optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
        self.log_loss = tf.losses.BinaryCrossentropy()
        self.loss_metric = tf.keras.metrics.Mean(name='train_loss')
        self.AUC_metric = tf.keras.metrics.AUC(name='train_AUC')
        self.binary_metric = tf.keras.metrics.BinaryAccuracy(
            name='train_BinaryAccuracy')

        train_log_dir = os.path.join(cfg.train.logs_base_dir, 'train')
        valid_log_dir = os.path.join(cfg.train.logs_base_dir, 'valid')
        self.train_summary_writer = tf.summary.create_file_writer(
            train_log_dir)
        self.valid_summary_writer = tf.summary.create_file_writer(
            valid_log_dir)
        return

    def __reset_all_metrics(self):
        self.loss_metric.reset_states()
        self.AUC_metric.reset_states()
        self.binary_metric.reset_states()

        return

    def __load_variables(self):
        if cfg.train.restore_model_path != '':
            print(cfg.train.restore_model_path)
            output = self.network(dict(
                self.data_loader.valid_X.take([0, 1], axis=0)),
                                  training=False)

            file = h5py.File(cfg.train.restore_model_path, 'r')
            weights = []
            for i in range(len(file.keys())):
                weights.append(file['weight' + str(i)].value)
            self.network.set_weights(weights)
            file.close()

            #             file = h5py.File(cfg.train.restore_model_path.replace('model-', 'optimizer-'), 'r')
            #             weights = []
            #             for i in range(len(file.keys())):
            #                 weights.append(file['weight' + str(i)].value)
            #             self.optimizer.set_weights(weights)
            #             file.close()

            step = int(
                cfg.train.restore_model_path.split('.h5')[0].split('-')[-1])
            self.optimizer.iterations.assign(step)

            print('Model restored')
        else:
            step = 0

        return step

    def __save_model(self, step):
        if not os.path.exists(cfg.train.models_base_dir):
            os.makedirs(cfg.train.models_base_dir)

        file_path = os.path.join(cfg.train.models_base_dir,
                                 'model-%d.h5' % step)
        file = h5py.File(file_path, 'w')
        weights = self.network.get_weights()
        for i in range(len(weights)):
            file.create_dataset('weight' + str(i), data=weights[i])
        file.close()

        file_path = os.path.join(cfg.train.models_base_dir,
                                 'optimizer-%d.h5' % step)
        file = h5py.File(file_path, 'w')
        weights = self.optimizer.get_weights()
        for i in range(len(weights)):
            file.create_dataset('weight' + str(i), data=weights[i])
        file.close()

        print('Model saved')

        return

    def run_train(self):
        step = self.__load_variables()

        self.validation.run_validation(step)

        for epoch in range(cfg.train.max_nrof_epochs):
            print('Start epoch %d' % epoch)

            self.__reset_all_metrics()

            batch_id = 0
            for batch_features, labels in tqdm(self.data_loader.train_loader):
                with tf.GradientTape() as tape:
                    output, output_aggregated, total_entropy,\
                    aggregated_mask_values_all, mask_values_all = self.network(batch_features, training=True)

                    loss = self.log_loss(labels, output)
                    # reg_loss = sum(self.network.losses)
                    reg_loss = cfg.train.weight_decay * tf.add_n([
                        tf.nn.l2_loss(w)
                        for w in self.network.trainable_variables
                    ])
                    total_loss = loss + cfg.train.sparsity_loss_weight * total_entropy + reg_loss

                grads = tape.gradient(total_loss,
                                      self.network.trainable_variables)
                capped_gvs = [
                    tf.clip_by_value(grad, -cfg.train.gradient_thresh,
                                     cfg.train.gradient_thresh)
                    for grad in grads
                ]

                self.optimizer.apply_gradients(
                    zip(capped_gvs, self.network.trainable_variables))

                self.loss_metric(total_loss)
                self.AUC_metric(labels, output)
                self.binary_metric(labels, output)

                with self.train_summary_writer.as_default():
                    # Visualization of the feature selection mask at decision step ni
                    for ni in range(len(mask_values_all)):
                        tf.summary.image("Mask for step" + str(ni),
                                         tf.expand_dims(
                                             tf.expand_dims(
                                                 mask_values_all[ni], 0), 3),
                                         max_outputs=1,
                                         step=step)
                    # Visualization of the aggregated feature importances
                    for ni in range(len(aggregated_mask_values_all)):
                        tf.summary.image(
                            "Aggregated mask",
                            tf.expand_dims(
                                tf.expand_dims(aggregated_mask_values_all[ni],
                                               0), 3),
                            max_outputs=1,
                            step=step)

                    tf.summary.scalar('Total_loss',
                                      self.loss_metric.result(),
                                      step=step)
                    tf.summary.scalar('Total entropy',
                                      total_entropy,
                                      step=step)
                    tf.summary.scalar('Log_Loss', loss, step=step)
                    tf.summary.scalar('Reg_Loss', reg_loss, step=step)
                    tf.summary.scalar('AUC_score',
                                      self.AUC_metric.result(),
                                      step=step)
                    tf.summary.scalar('Accuracy',
                                      self.binary_metric.result(),
                                      step=step)
                    tf.summary.scalar('Gini_index',
                                      2 * self.AUC_metric.result() - 1,
                                      step=step)
                    tf.summary.scalar(
                        'Learning_rate',
                        self.optimizer.learning_rate.__call__(step).numpy(),
                        step=step)

                template = 'Epoch [{}][{}/{}], loss: {}, binary accuracy: {}, AUC: {}'
                print(
                    template.format(epoch, batch_id,
                                    self.data_loader.nrof_batches, loss,
                                    self.binary_metric.result() * 100,
                                    self.AUC_metric.result() * 100))

                batch_id += 1
                step += 1

            self.__save_model(step)

            self.validation.run_validation(step)

            self.data_loader.shuffle_data()

        return
Ejemplo n.º 27
0
def output_variables(unsafe_building_type):
    validate = Validation()
    validate.unsafe_data['building_type'] = unsafe_building_type
    building_type = validate._clean_building_type()
    return _output_variables(building_type)
Ejemplo n.º 28
0
def run(run = False):  
    if run:
        val = Validation({})
        val.loadPredictors()
    else:
        val = build()
    val.testPredictors()
    val.evalPredictors()
    val.mergeAllConfusion()
    val.printPredictorsPerformance()
class InterpolatedProbModel(Model):
    def __init__(self):
        self.training_data_span_months = 15
        self.x_offset = 139.016
        self.x_divisor = 20
        self.max_iter = 50
        self.should_optimize = True

    def fit(self, training_data):
        # self.training_data = training_data
        self.validation = Validation(training_data)
        self._training_data = training_data

        # Want to go for minimum mean training error * std.dev

        if self.should_optimize:
            optimized_args = self.optimize_interpolator()
            self.x_offset = optimized_args[0]
            self.x_divisor = optimized_args[1]

        # get validation set
        # first is x_offset, then x_divisor
    def optimize_interpolator(self):

        cons = ({'fun': lambda x: x[0], 'type': 'ineq' },
                {'fun': lambda x: x[1] - 1, 'type': 'ineq' }) # only use low weights

        val = minimize(self.evaluate_training_error,
                [self.x_offset, self.x_divisor],
                method='COBYLA',
                constraints=cons,
                options={'maxiter': self.max_iter, 'disp': True})

        return val.x


    def evaluate_training_error(self, args):
        self.x_offset = args[0]
        self.x_divisor = args[1]

        minimizables = []

        for i in range(0,self.num_months()-self.training_data_span_months):
            training_data = self.training_data(i, self.training_data_span_months)
            test_data = self.test_data(i+self.training_data_span_months)
            pred = self.predict(BusinessForecast(test_data).convert(),
                    training_data=training_data)
            actual = ActualSchedule(test_data).bins()

            to_minimize = mean_absolute_error(pred,actual) * (pred - actual).std()
            minimizables.append(to_minimize)


        np_to_minimize = np.array(minimizables)
        avg = np_to_minimize.mean()
        print "x_offset: {}, x_divisor: {}, minimize: {}".format(self.x_offset,
                self.x_divisor,
                avg)

        return avg


    def predict(self, business_forecast, training_data=pd.DataFrame()):
        sum_bins = np.zeros(6)

        for i in business_forecast:
            gpm = GeneralProbModel()
            gpm.fit(training_data)
            gpm_prediction = gpm.predict([i])

            tpm = TimezoneProbModel()
            tpm.fit(training_data)
            tpm_prediction = tpm.predict([i], training_data=training_data)

            people_at_user_tz = training_data[training_data.user_tz == i['user_tz']]
            tmp = self.timezone_model_probability(people_at_user_tz.schedule_type.sum())

            sum_bins += gpm_prediction * (1 - tmp) + tpm_prediction * tmp

        return sum_bins

    #private
    def timezone_model_probability(self, sample_size):
        stuff = (sample_size - self.x_offset) / self.x_divisor

        print "sample_size: {}, x_offset: {}, x_divisor: {}, stuff: {}".format(sample_size,
                self.x_offset, self.x_divisor, stuff)

        # prevent overflow error
        if stuff < -549:
            exponent = -549
        else:
            exponent = stuff

        return 1.0 / (1 + math.exp(-exponent))

    def size(self):
        return self.validation.size()

    def num_months(self):
        return self.validation.num_months()

    def training_data(self, *args):
        return self.validation.training_data(*args)

    def year_month_index(self, *args):
        return self.validation.year_month_index(*args)

    def test_data(self, *args):
        return self.validation.test_data(*args)
Ejemplo n.º 30
0
import csv
import readValidate
from validation import Validation

try:
    f = open('../preprocessed_data/probabilities.txt', 'rb')
    h = open('../preprocessed_data/hops.txt', 'rb')
except IOError:
	print "No probabilities to validate "
    f.close()
    return
else:
    prob = pickle.load(f)
    hops = pickle.load(h)
    f.close()
    h.close()

shouldBeResult = readValidate.read("../data/validate.csv")

results = Validation()
results.validation(shouldBeResult,prob,hops)
Ejemplo n.º 31
0
        step += 1
        start = start + batch_size
        end = end + batch_size
    print("Optimization Finished!")

    # Calculate accuracy for 128 mnist test images
    #test_len = 128
    #test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))
    #test_label = mnist.test.labels[:test_len]
    test_data = x_test.reshape((-1, n_steps, n_input))
    test_label = np.reshape(y_test, (-1, 2))

    loss = sess.run(cost, feed_dict={x: test_data, y: test_label})

    print("{:.6f}".format(loss))
    print("Testing Accuracy:", \
        sess.run(accuracy, feed_dict={x: test_data, y: test_label}))

    classifier_value = sess.run([classifier_collection],
                                feed_dict={x: test_data})
    print(len(classifier_value))
    for value in classifier_value:
        print(value)
    proba_value = sess.run([proba_collection], feed_dict={x: test_data})
    for value in proba_value:
        print(value)

    v = Validation()
    v.calculateF1(input_value, classifier_value[0])
    v.allValidation(input_value, proba_value[0][:, 1])
Ejemplo n.º 32
0
from validation import Validation

validation = Validation()
validation.plot_aco()
Ejemplo n.º 33
0
 def __init__(self, required: bool = False, nullable: bool = True):
     super().__init__(required, nullable,
                      validation=Validation(lambda name, value: Validation.validate_gender(name, value)))
Ejemplo n.º 34
0
                "No such file exists in the specified path...Please see the 'dir' option under savingAndLoading in the config and ensure that your file is present there"
            )
        lastEpoch = getLastEpochFromFileName(fileName)
        startEpoch = lastEpoch + 1
        stage = getStageFromFileName(fileName)
        if stageChangeRequired(stage, lastEpoch):
            changeStage = True
            startEpoch = 1
            stage += 1
        lipreader = Lipreader(stage)
        lipreader = loadModel(lipreader, fileName, changeStage)
    else:
        lipreader = Lipreader()
    if config.gpuAvailable:
        lipreader = lipreader.cuda()

    if config.deviceCount > 1:
        lipreader = nn.DataParallel(lipreader)
    trainer = Trainer(lipreader)
    validator = Validation(lipreader)
    totalEpochs = config.training["Stage " + str(stage)]["epochs"]
    print("Started training at", datetime.now())
    with tqdm(total=totalEpochs - startEpoch + 1, desc="Epochs",
              position=0) as t:
        for epoch in range(startEpoch - 1, totalEpochs):
            trainer.train(epoch)
            validator.validate(epoch)
            t.update()
            saveModel(lipreader, epoch + 1)
        print(f"Successfully completed training of Stage {stage}")
class BackTest():
    def __init__(self,
            data=None,
            model=None,
            training_data_span_months=1):
        self._data_df = data
        self._model = model
        self.training_data_span_months = training_data_span_months
        self.validation = Validation(data)

    def errors(self):
        indices = [] # index of a row, formatted YYYY-M (e.g. 2016-8, 2016-9)
        predictions = []
        errors = [] # errors. Lower is better
        test_data_size = []
        actuals = []

        for i in range(0,self.num_months()-self.training_data_span_months):
            m = self._model()
            training_data = self.training_data(i, self.training_data_span_months)
            m.fit(training_data)

            td = self.test_data(i+self.training_data_span_months)

            print "Generating prediction..."
            prediction = m.predict(\
                    BusinessForecast(\
                    td).convert(),
                    training_data=training_data)

            actual = ActualSchedule(\
                    td).bins()

            print "prediction "
            print prediction
            print "\n"

            print "actual"
            print actual
            print "\n"

            error = mean_absolute_error(actual, prediction)
            errors.append(error)

            index = self.year_month_index(i+self.training_data_span_months)
            indices.append(index)
            print "time: {}, error: {}".format(index, error)

            predictions.append(prediction)
            actuals.append(actual)
            test_data_size.append(actual.sum())

        return pd.DataFrame({
            'errors': errors,
            'test_data_size': test_data_size,
            'predictions': predictions,
            'actuals': actuals
            }).set_index([indices])

    #private
    def size(self):
        return self.validation.size()

    def num_months(self):
        return self.validation.num_months()

    def training_data(self, *args):
        return self.validation.training_data(*args)

    def year_month_index(self, *args):
        return self.validation.year_month_index(*args)

    def test_data(self, *args):
        return self.validation.test_data(*args)
Ejemplo n.º 36
0
        if step % display_step == 0:
            # Calculate batch accuracy
            acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
            # Calculate batch loss
            loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
            print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
                  "{:.6f}".format(loss) + ", Training Accuracy= " + \
                  "{:.5f}".format(acc))
        #print(sess.run(tf.gradients(cost, x), feed_dict={x:batch_x, y:batch_y})[0])
        step += 1
    print("Optimization Finished!")

    # Calculate accuracy for 128 mnist test images

    #test_data = x_test.reshape((-1, n_steps, n_input))
    #test_data = np.transpose(x_test, [1,0,2])
    test_data = x_test
    test_label = y_test
    print("Testing Accuracy:", \
        sess.run(accuracy, feed_dict={x: test_data, y: test_label}))

    classifier_value = sess.run([classifier_collection],
                                feed_dict={x: test_data})[0]
    proba_value = sess.run([proba_collection], feed_dict={x: test_data})
    v = Validation()

    v.calculateF1(y_input, classifier_value + 1)
    v.allValidation(y_input - 1, proba_value[0][:, 1])
    #v.top_accuacy(y_input, classifier_value[0], proba_value[0][:,0],[1000,10000,50000])
 def __enter_file_name():
     file_name = v.enterStr('Enter file name:  ')
     return file_name
Ejemplo n.º 38
0
class SalespersonGUI():
    """
    GUI class to perform CRUD operations on the Salesperson table in the database.
    """
    def __init__(self):
        """
        Initialise Salesperson class.
        """

        # Data access object
        self.salesperson_dao = SalespersonDAO()

        # Validation object
        self.validator = Validation()

        # Form fields
        self.salesperson_id = tk.StringVar()
        self.title = tk.StringVar()
        self.firstname = tk.StringVar()
        self.surname = tk.StringVar()
        self.position = tk.StringVar()
        self.work_phone = tk.StringVar()
        self.email = tk.StringVar()

        # List of salesperson ids
        self.lb_ids = None

        # Messagebox title
        self.mb_title_bar = "Salesperson CRUD"

        pass

    def create_gui(self, root):
        """
        Create the GUI.

        This is the interface for the user to input data or query the database.
        It is composed of frames containing widgets (e.g. labels, input fields).

        Parameters (apart from self):
            root: main window of application

        Return: 
            salesperson_frame: the frame containing all the widgets for the 
                salesperson CRUD 
        """

        print("Creating Salesperson GUI ...")

        salesperson_frame = tk.Frame(root)
        salesperson_frame.pack()

        # FORM FRAME
        form_frame = tk.Frame(salesperson_frame)
        form_frame.pack()

        # FORM FRAME: ROW 0
        # Heading for Salesperson frame
        tk.Label(form_frame, text="Salesperson",
                 font=('arial', 10)).grid(row=0, column=0, columnspan=3)

        # FORM FRAME: ROW 1
        # Label and entry field (disabled) for "Salesperson ID"
        tk.Label(form_frame,
                 text="Salesperson ID",
                 font=('arial', 10),
                 width=20,
                 anchor='e',
                 bd=1,
                 pady=10,
                 padx=10).grid(row=1, column=0)

        tk.Entry(form_frame,
                 textvariable=self.salesperson_id,
                 width=30,
                 bd=1,
                 state=tk.DISABLED).grid(row=1, column=1)

        # Heading for Listbox to display and select Salesperson IDs
        tk.Label(form_frame, text="Salesperson IDs",
                 font=('arial', 10)).grid(row=1, column=2)

        # FORM FRAME: ROW 2
        # Label and combobox field for "Title"
        tk.Label(form_frame,
                 text="Title",
                 font=('arial', 10),
                 width=20,
                 anchor='e',
                 bd=1,
                 pady=10,
                 padx=10).grid(row=2, column=0)

        TITLE_VALUES = ("Mr", "Mrs", "Ms", "Miss", "Dr")

        ttk.Combobox(form_frame,
                     state='readonly',
                     textvariable=self.title,
                     values=TITLE_VALUES,
                     width=5).grid(row=2, column=1, sticky='w')

        # Listbox to display and select Salesperson IDs
        self.lb_ids = tk.Listbox(form_frame)
        self.lb_ids.grid(row=2, column=2, rowspan=6)
        self.lb_ids.bind('<<ListboxSelect>>', self.on_list_select)

        # FORM FRAME: ROW 3
        # Label and entry field for "First Name"
        tk.Label(form_frame,
                 text="First Name",
                 font=('arial', 10),
                 width=20,
                 anchor='e',
                 bd=1,
                 pady=10,
                 padx=10).grid(row=3, column=0)

        tk.Entry(form_frame, textvariable=self.firstname, width=30,
                 bd=1).grid(row=3, column=1)

        # FORM FRAME: ROW 4
        # Label and entry field for "Surname"
        tk.Label(form_frame,
                 text="Surname",
                 font=('arial', 10),
                 width=20,
                 anchor='e',
                 bd=1,
                 pady=10,
                 padx=10).grid(row=4, column=0)

        tk.Entry(form_frame, textvariable=self.surname, width=30,
                 bd=1).grid(row=4, column=1)

        # FORM FRAME: ROW 5
        # Label and entry field for "Position"
        tk.Label(form_frame,
                 text="Position",
                 font=('arial', 10),
                 width=20,
                 anchor='e',
                 bd=1,
                 pady=10,
                 padx=10).grid(row=5, column=0)

        POSITION_VALUES = ("Junior", "Associate", "Senior")

        ttk.Combobox(form_frame,
                     state='readonly',
                     textvariable=self.position,
                     values=POSITION_VALUES,
                     width=10).grid(row=5, column=1, sticky='w')

        # FORM FRAME: ROW 6
        # Label and entry field for "Work Phone"
        tk.Label(form_frame,
                 text="Work Phone",
                 font=('arial', 10),
                 width=20,
                 anchor='e',
                 bd=1,
                 pady=10,
                 padx=10).grid(row=6, column=0)

        tk.Entry(form_frame, textvariable=self.work_phone, width=30,
                 bd=1).grid(row=6, column=1)

        # FORM FRAME: ROW 7
        # Label and entry field for "Email"
        tk.Label(form_frame,
                 text="E-mail",
                 font=('arial', 10),
                 width=20,
                 anchor='e',
                 bd=1,
                 pady=10,
                 padx=10).grid(row=7, column=0)

        tk.Entry(form_frame, textvariable=self.email, width=30,
                 bd=1).grid(row=7, column=1)

        # BUTTON FRAME
        button_frame = tk.Frame(salesperson_frame, pady=10)
        button_frame.pack()

        tk.Button(button_frame,
                  width=10,
                  text="Clear",
                  command=self.clear_fields).pack(side=tk.LEFT)

        tk.Button(button_frame, width=10, text="Save",
                  command=self.save).pack(side=tk.LEFT)

        tk.Button(button_frame, width=10, text="Delete",
                  command=self.delete).pack(side=tk.LEFT)

        tk.Button(button_frame, width=10, text="Load",
                  command=self.load).pack(side=tk.LEFT)

        return salesperson_frame

    def clear_fields(self):
        """
        Clear the fields of the form

        Parameters (apart from self): None

        Return: None
        """
        self.salesperson_id.set("")
        self.title.set("")
        self.firstname.set("")
        self.surname.set("")
        self.position.set("")
        self.work_phone.set("")
        self.email.set("")
        pass

    def save(self):
        """
        Save the data displayed on the form to the database.

        Get salesperson data to be saved from the global instance attributes,
        then validate data by calling validate_fields()
        If the data is invalid, a message box is presented to the user.
        If the data is valid, the data is either saved or updated
        If salesperson_id is present, the data is updated
        If not, a new salesperson record is created in the database
 
        Parameters (apart from self): None
 
        Return: None
            
        """
        print("Saving a salesperson ...")

        data = self.get_fields()

        valid_data, message = self.validate_fields(data)
        if valid_data:
            if (len(data['salesperson_id']) == 0):
                print("Calling create() as salesperson_id is absent")
                self.create(data)
            else:
                print("Calling update() as salesperson_id is present")
                self.update(data)
                pass
        else:
            message_text = "Invalid fields.\n" + message
            messagebox.showwarning(self.mb_title_bar,
                                   message_text,
                                   icon='warning')
            pass

    def get_fields(self):
        sp = {}

        sp['salesperson_id'] = self.salesperson_id.get()
        sp['title'] = self.title.get()
        sp['firstname'] = self.firstname.get()
        sp['surname'] = self.surname.get()
        sp['position'] = self.position.get()
        sp['work_phone'] = self.work_phone.get()
        sp['email'] = self.email.get()

        return sp

    def validate_fields(self, data):
        """
        Validate the data entered in the fields of the form

        Parameters (apart from self):
            data: dictionary object containing all the information entered on the form

        Return:
            valid_data: a boolean indication whether the data is valid (True) or
                not valid (False)
            message: a string containing details about the fields that are not valid
            
            Returned as a tuple (valid_data, message)
        """

        valid_data = True
        message_list = []

        # Check field is not empty
        if len(data['title']) == 0:
            valid_data = False
            message_list.append("title is empty")
        if len(data['firstname']) == 0:
            valid_data = False
            message_list.append("firstname is empty")
        if len(data['surname']) == 0:
            valid_data = False
            message_list.append("surname is empty")
        if len(data['position']) == 0:
            valid_data = False
            message_list.append("position year is empty")
        if len(data['work_phone']) == 0:
            valid_data = False
            message_list.append("work phone is empty")
        if len(data['email']) == 0:
            valid_data = False
            message_list.append("e-mail is empty")

        # Position and title are selected from dropdown menu
        # so no further validation needed

        # Check that firstname and surname are alphabetic
        if not self.validator.is_alphabetic(data['firstname']):
            valid_data = False
            message_list.append("invalid firstname")

        if not self.validator.is_alphabetic(data['surname']):
            valid_data = False
            message_list.append("invalid surname")

        # Other checks
        if not self.validator.is_email(data['email']):
            valid_data = False
            message_list.append("invalid email format")

        if not self.validator.is_phone_number(data['work_phone']):
            valid_data = False
            message_list.append("invalid phone number format")

        message = ', '.join(message_list)

        return valid_data, message
        pass

    def create(self, data):
        """
        Create a new record in the database.

        A messagebox is used display the outcome (success or failure) 
        of the create operation to the user.

        Parameters (apart from self):
            data: dictionary object containing salesperson data to be saved
 
        Return: None
        """
        print("Creating a salesperson ...")
        print(data)

        session = db.get_db_session()
        result = self.salesperson_dao.create(session, data)
        session.close()

        messagebox.showinfo(self.mb_title_bar, result)
        pass

    def update(self, data):
        """
        Update a record in the database.

        A messagebox is used display the outcome (success or failure) 
        of the update operation to the user.

        Parameters (apart from self):
            data: dictionary object containing salesperson data to be saved
 
        Return: None
        """
        print("Updating a salesperson ...")
        print(data)

        session = db.get_db_session()
        result = self.salesperson_dao.update(session, data['salesperson_id'],
                                             data)
        session.close()

        messagebox.showinfo(self.mb_title_bar, result)
        pass

    def delete(self):
        """
        Delete a record from the database.

        The salesperson_id of the record to be deleted is obtained from a 
        global attribute.

        A messagebox is used display the outcome (success or failure) 
        of the delete operation to the user.

        Parameters (apart from self): None
 
        Return: None

        """
        print("Deleting a salesperson ...")

        sp_id = self.salesperson_id.get()
        print(id)

        session = db.get_db_session()
        result = self.salesperson_dao.delete(session, sp_id)
        session.close()

        messagebox.showinfo(self.mb_title_bar, result)
        pass

    def load(self):
        """
        Retrieve a list of IDs from the database and load them into a listbox.
 
        Parameters (apart from self):
  
        Return: None
        """

        session = db.get_db_session()
        result = self.salesperson_dao.find_ids(session)
        session.close()
        print("result: ", result)

        if "salesperson_ids" in result:
            list_ids = result["salesperson_ids"]
            self.lb_ids.delete(0, tk.END)

            print("Setting salesperson_id in listbox")

            for x in list_ids:
                self.lb_ids.insert(tk.END, x)
            pass

    def on_list_select(self, evt):
        """
        Actions to be triggered when a user clicks an item in the listbox.

        Defined above in create_gui(), where on_list_select is bound to the
        listbox selection.

        Parameters (apart from self):
            evt: object containing information about the mouse click

        Return: None
        """

        w = evt.widget
        index = int(w.curselection()[0])
        value = w.get(index)

        print(index)
        print(value)

        session = db.get_db_session()
        result = self.salesperson_dao.find_by_id(session, value)
        session.close()

        print("result: ", result)

        sp = result['salesperson']
        self.populate_fields(sp)

        pass

    def populate_fields(self, sp):
        """
        Populate the fields of the form with data.

        Parameters (apart from self):
            sp: dictionary object containing all the information
                about a salesperson

        Return: None
        """

        self.salesperson_id.set(sp['salesperson_id'])
        self.title.set(sp['title'])
        self.firstname.set(sp['firstname'])
        self.surname.set(sp['surname'])
        self.position.set(sp['position'])
        self.work_phone.set(sp['work_phone'])
        self.email.set(sp['email'])
        pass
Ejemplo n.º 39
0
 def get_result(self, predict_classifier, predict_probability):
     v = Validation()
     v.calculateF1(self.y_test, predict_classifier)
     v.allValidation(self.y_test, predict_probability)
Ejemplo n.º 40
0
class StaffGUI():
    """
    GUI class to perform CRUD operations on the employee table in the database.
    """
    def __init__(self):
        """
        The initialiser is used to "instantiate" attributes of the class.
        The attributes are the "variables" that have been declared "outside" 
        of the methods of the class.
        Some attributes may have not been declared as they may be created 
        any where in the class methods (python allows this).

        Attributes are like global variables for the class, as they are 
        available to any method in the class.
        And, to use them, they must be prefixed with "self."
        
        This differentiates them from "local" variables which are 
        defined/created and used within a single method

        If you need to pass the value of a local variable from one method 
        to another, then you must pass "parameters" to the method 

        We cannot create the GUI here as we need to return a reference to 
        the frame created.
        Hence, we need to implement a 'normal' function to do this e.g. create_gui()

        Parameters (apart from self): None

        Return: None

        """

        # Instantiate a data access object
        # Contains methods to access the database
        self.emp_dao = StaffDAO()

        # Instantiate a validation object
        # Contains methods to validate input fields
        self.validator = Validation()

        # Form fields
        # Instantiate stringvars - hold  data entered in  fields of form
        self.staff_id = tk.StringVar()
        self.first_name = tk.StringVar()
        self.last_name = tk.StringVar()
        self.title = tk.StringVar()
        self.email = tk.StringVar()
        self.sex = tk.StringVar()
        self.contact_no = tk.IntVar()

        # List of employee ids - lb for listbox
        self.lb_ids = None

        # Messagebox title
        self.mb_title_bar = "Staff CRUD"

        pass

    """
    Create a high level frame which contains the entire GUI 
    """

    def create_gui(self, root):

        print("Creating Staff GUI ...")

        emp_frame = tk.Frame(root)
        emp_frame.pack()

        form_frame = tk.Frame(emp_frame)
        form_frame.pack()

        tk.Label(form_frame, font=('arial', 10),
                 text="Staff").grid(row=0, column=0, columnspan=3)

        # row 1: employee_id label, employee_id entry and list_of_ids label
        tk.Label(form_frame,
                 text="Staff Id",
                 font=('arial', 10),
                 width=20,
                 anchor="e",
                 bd=1,
                 pady=10,
                 padx=10).grid(row=1, column=0)
        tk.Entry(form_frame,
                 textvariable=self.staff_id,
                 width=30,
                 bd=1,
                 state=tk.DISABLED).grid(row=1, column=1)
        tk.Label(form_frame, text="Staff IDs",
                 font=('arial', 10)).grid(row=1, column=2)

        # row 2: title label and combobox (the listbox will go through)
        tk.Label(form_frame,
                 text="Title",
                 font=('arial', 10),
                 width=20,
                 anchor="e",
                 bd=1,
                 pady=10,
                 padx=10).grid(row=2, column=0)
        VALUES = ('Mr', 'Mrs', 'Ms', 'Miss', 'Dr')
        ttk.Combobox(form_frame,
                     state="readonly",
                     textvariable=self.title,
                     values=VALUES,
                     width=10).grid(row=2, column=1, sticky="w")
        self.title.set(VALUES[0])

        # row 3: firstname label, firstname entry and listbox of ids
        tk.Label(form_frame,
                 text="First name",
                 font=('arial', 10),
                 width=20,
                 anchor="e",
                 bd=1,
                 pady=10,
                 padx=10).grid(row=3, column=0)
        tk.Entry(form_frame, textvariable=self.first_name, width=30,
                 bd=1).grid(row=3, column=1)

        self.lb_ids = tk.Listbox(form_frame)
        self.lb_ids.grid(row=3, column=2, rowspan=5)
        self.lb_ids.bind('<<ListboxSelect>>', self.on_list_select)

        # row 4: lastname label and entry (the listbox will go through)
        tk.Label(form_frame,
                 text="Last name",
                 font=('arial', 10),
                 width=20,
                 anchor="e",
                 bd=1,
                 pady=10,
                 padx=10).grid(row=4, column=0)
        tk.Entry(form_frame, textvariable=self.last_name, width=30,
                 bd=1).grid(row=4, column=1)

        # row 5: title label and combobox (the listbox will go through)
        tk.Label(form_frame,
                 text="Sex",
                 font=('arial', 10),
                 width=20,
                 anchor="e",
                 bd=1,
                 pady=10,
                 padx=10).grid(row=5, column=0)
        VALUES = ('Male', 'Female', 'Undefined')
        ttk.Combobox(form_frame,
                     state="readonly",
                     textvariable=self.sex,
                     values=VALUES,
                     width=10).grid(row=5, column=1, sticky="w")

        # row 6: email label and combobox (the listbox will go through)
        tk.Label(form_frame,
                 text="Email",
                 font=('arial', 10),
                 width=20,
                 anchor="e",
                 bd=1,
                 pady=10,
                 padx=10).grid(row=6, column=0)
        tk.Entry(form_frame, textvariable=self.email, width=30,
                 bd=1).grid(row=6, column=1)

        # row 7: work_phone label and combobox (the listbox will go through)
        tk.Label(form_frame,
                 text="Contact No",
                 font=('arial', 10),
                 width=20,
                 anchor="e",
                 bd=1,
                 pady=10,
                 padx=10).grid(row=7, column=0)
        tk.Entry(form_frame, textvariable=self.contact_no, width=30,
                 bd=1).grid(row=7, column=1)

        # Buttons

        button_frame = tk.Frame(emp_frame, pady=10)
        button_frame.pack()

        tk.Button(button_frame,
                  width=10,
                  text="Clear",
                  command=self.clear_fields).pack(side=tk.LEFT)
        tk.Button(button_frame, width=10, text="Save",
                  command=self.save).pack(side=tk.LEFT)
        tk.Button(button_frame, width=10, text="Delete",
                  command=self.delete).pack(side=tk.LEFT)
        tk.Button(button_frame, width=10, text="Load",
                  command=self.load).pack(side=tk.LEFT)

        # Return a reference to the high level frame created
        return emp_frame

    """
    Clears all the fields of the form
    """

    def clear_fields(self):

        self.staff_id.set("")
        self.first_name.set("")
        self.last_name.set("")
        #self.sex.set("") # Do not clear if using dropdown
        #self.title.set("") # Do not clear if using dropdown
        self.email.set("")
        self.contact_no.set("")
        pass

    """
    Save the data displayed on the form to the database
    """

    def save(self):

        print("Saving staff....")

        # Get the data
        data = self.get_fields()

        # Validate the data
        valid_data, message = self.validate_fields(data)
        if valid_data:
            if (len(data['staff_id']) == 0):
                # If nothing has been entered in employee_id
                # i.e. its length is zero characters
                print("Calling create() as staff_id is absent")
                self.create(data)
            else:
                print("Calling update() as staff_id is present")
                self.update(data)
                pass
        else:
            message_text = "Invalid fields.\n" + message
            messagebox.showwarning(self.mb_title_bar,
                                   message_text,
                                   icon="warning")
            pass

    """
    Get the data entered in the fields of the form
    """

    def get_fields(self):

        emp = {}
        # employee_id is ignored when creating a record
        emp['staff_id'] = self.staff_id.get()
        emp['first_name'] = self.first_name.get()
        emp['last_name'] = self.last_name.get()
        emp['title'] = self.title.get()
        emp['sex'] = self.sex.get()
        emp['email'] = self.email.get()
        emp['contact_no'] = self.contact_no.get()

        return emp

    """
        Validate the data entered in the fields of the form
    """

    def validate_fields(self, data):

        # By default set to true, anything wrong will turn it to false
        valid_data = True
        # Instantiate an empty list to contain the messages
        message_list = []
        # Check for blank fields
        # Do not check employee_id as this is generated by the database
        #if len(data['employee_id']==0:
        #    valid_data = False
        #    message_list.append("employee_id is empty")
        if len(str(data['first_name'])) == 0:
            valid_data = False
            message_list.append("first_name is empty")
        if len(str(data['last_name'])) == 0:
            valid_data = False
            message_list.append("last_name is empty")
        if len(data['title']) == 0:
            valid_data = False
            message_list.append("title is empty")
        if len(data['sex']) == 0:
            valid_data = False
            message_list.append("sex is empty")
        if len(str(data['email'])) == 0:
            valid_data = False
            message_list.append("work_phone is empty")
        if len(str(data['contact_no'])) == 0:
            valid_data = False
            message_list.append("contact_no is empty")

        if not self.validator.is_alphabetic(data['first_name']):
            valid_data = False
            message_list.append("invalid first_name")

        if not self.validator.is_alphabetic(data['last_name']):
            valid_data = False
            message_list.append("invalid last_name")

        # Check if title is in a list [Mr, Ms, Mrs, Dr, etc]
        if self.title.get() not in ["Mr", "Ms", "Mrs", "Miss", "Dr"]:
            valid_data = False
            message_list.append("title should be Mr, Ms, Mrs, Miss or Dr")

        # Check if email follows a certain pattern
        # i.e contains an @ followed by a dot
        if not self.validator.is_email(data['email']):
            valid_data = False
            message_list.append("invalid email format")

        # Join the items in the list as a string separated with a comma and a space
        message = ', '.join(message_list)

        return valid_data, message  # return 2 values

    """
     Create a new record in the database.
    """

    def create(self, data):

        print("Creating an staff ...")
        print(data)

        session = db.get_db_session()  # Get a session (database.py)
        result = self.emp_dao.create(session, data)
        # result is a tuple e.g. ("Employee added successfully", 1004)
        #result, employee_id = self.emp.create(data)
        # if you wish to get the message and employee_id separately
        session.close()  # Close the session

        messagebox.showinfo(self.mb_title_bar, result)

        pass

    """
    Create a new record in the database. 
    """

    def update(self, data):

        print("Updating staff ...")
        print(data)

        session = db.get_db_session()  # Get a session (database.py)
        result = self.emp_dao.update(session, data['staff_id'], data)
        session.close()  # close the session

        # Display the returned message to the user - use a messagebox
        # Display everything that is returned in the result
        messagebox.showinfo(self.mb_title_bar, result)
        pass

    """
    Delete a record in the database
    """

    def delete(self):

        # Grab the employee_id from the stringvar
        id = self.staff_id.get()
        print(id)

        # Call the data access object to do the job
        # Pass the id as parameter to the delete() method
        session = db.get_db_session()  # Get a session (database.py)
        result = self.emp_dao.delete(session, id)
        session.close()  # Close the session

        # Display the returned message to the user - use a messagebox
        # Display everything that is returned in the result
        messagebox.showinfo(self.mb_title_bar, result)
        pass

    """
    load records in the database
    """

    def load(self):

        session = db.get_db_session()  # Get a session (database.py)
        result = self.emp_dao.find_ids(session)  # {"employee_ids": [1, 2, 3]}
        session.close()  # Close the session
        print("result", result)
        # Check if there is an entry in the result dictionary
        if "staff_ids" in result:
            list_ids = result['staff_ids']
            self.lb_ids.delete(0, tk.END)
            print("Setting staff_id in listbox ...")
            for x in list_ids:
                self.lb_ids.insert(tk.END, x)
                #print(x)
            pass

    """
    on_list_select() is triggered when a user clicks an item in the listbox.
    """

    def on_list_select(self, evt):

        w = evt.widget
        index = int(w.curselection()[0])
        # index = position of the item clicked in the list, first item is item 0 not 1
        value = w.get(index)
        # value of the item clicked, in our case it's the employee_id
        print(index)
        print(value)

        # Call find_by_id and populate the stringvars of the form
        session = db.get_db_session()  # Get a session (database.py)
        result = self.emp_dao.find_by_id(session, value)
        session.close()  # close the session
        print("result", result)
        # { "employee" : {"employee_id": "", "firstname": "", etc}}
        emp = result['staff']
        self.populate_fields(emp)
        pass

    """
    Populate the fields of the form with data
    """

    def populate_fields(self, emp):

        # Set the values from the dict to the stringvars
        self.staff_id.set(emp['staff_id'])
        self.first_name.set(emp['first_name'])
        self.last_name.set(emp['last_name'])
        self.title.set(emp['title'])
        self.sex.set(emp['sex'])
        self.email.set(emp['email'])
        self.contact_no.set(emp['contact_no'])
        pass
Ejemplo n.º 41
0
from discord import Game
from discord.ext import commands

import validation
from gw2ApiKey import GW2Api
from mongoDB import MongoDB
from utils import Utils, UtilsCommand, UtilsDiscordRoles, ErrorMessages
from validation import Validation

utils = Utils()
description = utils.BOT_DESCRIPTION
askForAPIKey = utils.API_QUESTION
command_description = UtilsCommand()
server_roles = UtilsDiscordRoles()
error_messages = ErrorMessages()
validation = Validation()
mongoDb = MongoDB()


class DiscordBot(commands.Bot):
    # old but u run the bot only on one server, its fine
    # current_server_joined = None

    def __init__(self):
        super().__init__(command_prefix="!",
                         description=description,
                         pm_help=True,
                         has_permission=8,
                         status=discord.Status.idle)
        self.add_command(self.be_rude)
        self.add_command(self.reg)
Ejemplo n.º 42
0
def options(arr):
    for i in arr:
        print(i)

    choice = v.intValidateInRange('Your choice - ', 0, len(arr))
    return choice
Ejemplo n.º 43
0
def output_variables(unsafe_building_type):
    validate = Validation()
    validate.unsafe_data['building_type'] = unsafe_building_type
    building_type = validate._clean_building_type()
    return _output_variables(building_type)
 def fit(self, training_data):
     self.validation = Validation(training_data)
     self._training_data = training_data
     self.smoother = self.optimize_smoother()