Example #1
0
def bitStreamify(header, Y, Cb, Cr):
  toSend = bitarray.bitarray()
  for bitLength, value in zip(HEADER_FORMAT, header):
    toSend += bitarray.bitarray(np.binary_repr(value, width=bitLength))
  # start = bitarray.bitarray("1111111111011000")
  # end = bitarray.bitarray("1111111111011001")
  # toSend = start + header + end
  toSend += Encoding.encode(Y, Encoding.huffmanLookupLuminanceDC, Encoding.huffmanLookupLuminanceAC)
  toSend += Encoding.encode(Cb, Encoding.huffmanLookupChrominanceDC, Encoding.huffmanLookupChrominanceAC)
  toSend += Encoding.encode(Cr, Encoding.huffmanLookupChrominanceDC, Encoding.huffmanLookupChrominanceAC)
  return toSend
Example #2
0
def cnBeta(raw):
	# decode and encoding
	newRaw = Encoding.try_decode(raw)
	if not newRaw:
		print 'Try decoding failed'
		return 'N/A'

	newRaw = Encoding.encode_ignore(newRaw, 'utf-8')

	soup = BeautifulSoup(raw)
	content = soup.find('div' , { 'class' : 'content'})

	if not content:
		return 'N/A'

	return str(content).replace('\n', '').replace('\r', '')
Example #3
0
    def get_new_board_state(self, board, move, player):
        # Returns new game state depending on the current state 'board'

        new_board = copy.deepcopy(board)
        if move == (-1, -1):
            return new_board
        if new_board.count == 1:
            return self.get_second_board_state(new_board, move)
        elif new_board.count == 2:
            return self.get_third_board_state(new_board,move)
        else:
            # Once past the 3rd state, the mask is updated following the same pattern.

            r,c = move
            if self.game_won(new_board.state, move, player):
                new_board.game_won = 1
            if player == 0:
                new_board.state[r, c] = 'X'
            else:
                new_board.state[r, c] = 'O'

            new_board.valid_mask[r, c] = 0
            new_board.count += 1
            new_board.encoding = Encode.encode(new_board.state, new_board.count)

        return new_board
Example #4
0
def deStreamify(bitstream):
  header = []
  index = 0
  for bitLength in HEADER_FORMAT:
    header.append(int(bitstream[index:index + bitLength].to01(), 2))
    index += bitLength

  bitstream = bitstream[index:]
  bits = iter(bitstream)
  Y = Encoding.decode(bits, Encoding.huffmanRootLuminanceDC, Encoding.huffmanRootLuminanceAC)

  Cb = Encoding.decode(bits, Encoding.huffmanRootChrominanceDC, Encoding.huffmanRootChrominanceAC)

  Cr = Encoding.decode(bits, Encoding.huffmanRootChrominanceDC, Encoding.huffmanRootChrominanceAC)

  # return header, Y, Cb, Cr
  return header, Y, Cb, Cr
Example #5
0
def Sampling(Type, Input_Dictionary, Output_Dictionary, Model_local, Input,
             Sentence_Length):
    #Create code for sampling neural nets
    json_list = {}
    if (Type == "Class"):
        Classes = pickle.load(open(Output_Dictionary, 'rb'))

        #Vector input
        vec_query = Encoding.gen_query_vec(Input, Input_Dictionary,
                                           Sentence_Length)
        input_array = np.array(vec_query, dtype=float)
        sample_array = np.reshape(input_array, (-1, len(input_array)))
        accuracy, train_op, x, y_, y = classifier_model(
            len(Classes), len(input_array))
        with tf.Session() as sess:
            # variables need to be initialized before we can use them
            sess.run(tf.global_variables_initializer())
            saver = tf.train.Saver(tf.global_variables())
            saver.restore(sess, Model_local + "model.ckpt")

            classification = y.eval({x: sample_array})
            for i in range(len(classification[0])):
                json_list[str(Classes[i])] = str(round(classification[0][i],
                                                       2))
    elif (Type == "Location"):
        vec_query = Encoding.gen_query_vec(Input, Input_Dictionary,
                                           Sentence_Length)
        input_array = np.array(vec_query, dtype=float)
        sample_array = np.reshape(input_array, (-1, len(input_array)))
        train_step, accuracy, x, y_, keep_prob, prediction = three_dem_trainer_model(
            Sentence_Length, len(input_array))

        with tf.Session() as sess:
            # variables need to be initialized before we can use them
            sess.run(tf.global_variables_initializer())
            saver = tf.train.Saver(tf.global_variables())
            saver.restore(sess, Model_local + "model.ckpt")
            classification = prediction.eval({x: sample_array, keep_prob: 1.0})

            for i in range(len(classification[0])):
                json_list[str(i)] = str(
                    round((50 + classification[0][i]) * 0.01, 2))

    return json_list
Example #6
0
def test_encoding():
    gaps = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]

    encoder = Encoding.Encoding()

    en_gamma_gaps = encoder.run(gaps, "g")
    en_delta_gaps = encoder.run(gaps, "d")

    print("gap    gamma encode    delta encode")
    for i in gaps:
        print(str(i) + "  " + str(encoder.gamma_encoding(i)) + "  " +
              str(encoder.delta_encoding(i)),
              end="")
        print()
Example #7
0
    def get_third_board_state(self, second_board_state, move):
        # Return third game state. Requires new mask

        r,c = move
        second_board_state.state[r, c] = 'X'
        third_board_state = copy.deepcopy(second_board_state)
        mask = np.ones((19,19), dtype=np.int32) # Unmask all positions except for occupied
        for i in range(0, 19):
            for j in range(0, 19):
                if third_board_state.state[i,j] != '.':
                    mask[i,j] = 0
        third_board_state.valid_mask = mask
        third_board_state.count += 1
        third_board_state.encoding = Encode.encode(third_board_state.state, third_board_state.count)
        return third_board_state
Example #8
0
    def get_second_board_state(self, first_board_state, move):
        # Return second game state. Requires special mask

        r,c = move
        second_board_state = copy.deepcopy(first_board_state)
        mask = np.ones((19,19), dtype=np.int32) # Mask all moves within 3 moves of the middle 'X'
        mid = int((self.board_size - 1) / 2)
        for i in range(mid - 3, mid + 4):
            for j in range(mid - 3, mid + 4):
                mask[i,j] = 0
        second_board_state.valid_mask = mask
        second_board_state.state[r, c] = 'O'
        second_board_state.count += 1
        second_board_state.encoding = Encode.encode(second_board_state.state, second_board_state.count)
        return second_board_state
Example #9
0
def test_block_compression():
    dictionary = {}
    # term, df, postings ptr
    dictionary = {
        "automation": [3, 5],
        "automatic": [5, 6],
        "autograph": [8, 90],
        "NASA": [10, 34],
        "housekeeper": [2, 495],
        "household": [48, 95],
        "houseboat": [3, 48]
    }

    encoder = Encoding.Encoding()
    compressed_dictionary = encoder.blocked_compression(dictionary, 8)
    print()
Example #10
0
    def get_empty_board_state(self):
        # Return initial game state. First move is fixed. Requires special mask

        # 1 = black, 'X'; 2 = white, 'O'
        board = np.chararray((19, 19), unicode = True)
        mid = int((self.board_size-1)/2)
        board[:, :] = '.'
        board[mid,mid] = 'X'

        mask = np.zeros((19,19), dtype=np.int32) # Mask all moves not immediately beside the middle 'X'
        for i in range(mid - 1, mid + 2):
            for j in range(mid - 1, mid + 2):
                mask[i,j] = 1
        mask[mid,mid] = 0
        count = 1
        code = Encode.encode(board, count)
        board.game_won = 0
        return BoardState(board, code, mask, count)
Example #11
0
def test_front_coding_compression():
    dictionary = {}
    # term, df, postings ptr
    dictionary = {
        "automation": [3, 5],
        "automatic": [5, 6],
        "NASA": [10, 34],
        "housekeeper": [2, 495],
        "household": [48, 95],
        "autograph": [8, 90],
        "houseboat": [3, 48]
    }

    for data in files:
        dictionary[data[2]] = [data[0], data[1]]

    encoder = Encoding.Encoding()
    compressed_dictionary = encoder.front_coding_compression(
        sorted(dictionary), 3, 4)
    y = 5
Example #12
0
def test_group_term_by_front_code():
    encoder = Encoding.Encoding()
    dictionary = {
        "automation": [3, 5],
        "automatic": [5, 6],
        "NASA": [10, 34],
        "housekeeper": [2, 495],
        "household": [48, 95],
        "autograph": [8, 90],
        "houseboat": [3, 48]
    }
    sorted_term_list = []

    for term in sorted(dictionary, key=lambda kv: kv[0]):
        sorted_term_list.append(term)

    subgrouped_list = encoder.group_term_by_front_code(sorted_term_list, 4)

    compressed_index = encoder.compress_sub_group(subgrouped_list, 4, 3)
    print()
Example #13
0
def main():
    # Input two integers
    x = int(input("Enter x = "))
    y = int(input("Enter y = "))

    # Calculate the bit lengths of the numbers
    length1 = len(bin(x)) - 2
    length2 = len(bin(y)) - 2

    # Check who has a longer bit length
    if length2 > length1:
        length = length2
    else:
        length = length1

    # Calculate the 0 and 1 encodings and then find the set intersection
    # Also check the time it takes to do this
    start = time.time()
    set_intersect = set.intersection(Encoding.make_one_encoding(x, length), Encoding.make_zero_encoding(y, length))
    print("Time taken to compare encodings: ", time.time() - start)

    # Assert the inequalities and if the inequalities fail then
    # Show the encodings of the numbers for which it failed

    if len(set_intersect) == 0:
        if x <= y:
            print(x, "is less than or equal to", y)
        else:
            print(Encoding.make_one_encoding(x, length))
            print(Encoding.make_zero_encoding(y, length))
    else:
        if x > y:
            print(x, "is greater than", y)
        else:
            print(Encoding.make_one_encoding(x, length))
            print(Encoding.make_zero_encoding(y, length))
Example #14
0
    def Build(File_Location, Column_Label, Column_Type, Input_Column):
        logging.basicConfig(filename="Firefly.log")

        Default_Sentence_Length = 15
        Dictionary_Location = './Util/store.csv'
        Testing_Amount = 0.25
        Divercity = 5
        desired_lines = 100000

        input_type = []
        location_fortype = []
        with open(Dictionary_Location, 'r', encoding="utf8") as dict_local:
            lines = csv.reader(dict_local)
            for line in lines:
                input_type.append(line[0])
                location_fortype.append(line[1])

        with open('./Util/netstructure.txt') as f:
            lines = [line.strip() for line in list(f)]
            data_name = []
            data_structure = []
            data_position = []
            for line in lines:
                parts = line.split(":")
                data_name.append(parts[0])
                data_structure.append(parts[1])
                data_position.append(parts[2])

        for inputs_name_types in range(len(input_type)):
            if input_type[inputs_name_types] in data_name:
                print(input_type[inputs_name_types] + ' is in NetStructure')
            else:
                with open('./Util/netstructure.txt', "a") as netstructure:
                    netstructure.write(input_type[inputs_name_types] + ':' +
                                       Column_Type[inputs_name_types] + ':' +
                                       'NotTrained')
        dicts_store = []
        for charc in range(len(Column_Label)):
            inList = Column_Label[charc] in input_type
            if inList == True:
                dicts_store.append(location_fortype[input_type.index(
                    Column_Label[charc])])
            elif Column_Type[charc] == "Text" or Column_Type[charc] == "Class":
                #Build dictionary
                stored_dict = Encoding.dictionary(File_Location, charc,
                                                  Column_Label[charc],
                                                  Column_Type[charc])
                add_to_store = Column_Label[charc] + ',' + stored_dict + '\n'
                with open(Dictionary_Location, "a") as myfile:
                    myfile.write(add_to_store)
                dicts_store.append(stored_dict)
            else:
                dicts_store.append("None")

        trainingdata_Store = []
        vector_Store = []
        with open(File_Location, 'r', encoding="utf8") as trainingdata:
            lines = csv.reader(trainingdata)
            elements = len(Column_Label)  #number of elements in column
            for element in range(elements):
                trainingdata_Store.append([])
                vector_Store.append([])
            lines = csv.reader(trainingdata)
            for line in lines:
                training_elements = []
                p = 0
                for parts in line:
                    if p < elements:
                        trainingdata_Store[p].append(parts)
                    p += 1
        #Vectorise Training File
        increment_line = 0
        while increment_line < desired_lines:
            print("Line: ", str(increment_line), "Outof: ", str(desired_lines))
            for elements in range(len(trainingdata_Store)):
                folder_for_store = Column_Label[elements]
                if Column_Type[elements] == "Text":
                    vector_gen_store = []
                    for training_line in trainingdata_Store[Input_Column]:
                        vector_gen_store.append(
                            Encoding.gen_query_vec(training_line,
                                                   dicts_store[elements],
                                                   Default_Sentence_Length))
                        #save to a pickle
                    vector_Store[elements].append(list(vector_gen_store))
                elif Column_Type[elements] == "Class":
                    vector_gen_store = []
                    val = 0
                    for training_line in trainingdata_Store[elements]:
                        '''if dicts_store[elements] == "./Dictionary/Intent.pickle":
							print(trainingdata_Store[Input_Column][val])'''

                        vector_gen_store.append(
                            Encoding.gen_intent_vec(training_line,
                                                    dicts_store[elements]))
                        val += 1
                    #save to a pickle
                    vector_Store[elements].append(list(vector_gen_store))
                elif Column_Type[elements] == "Location":
                    vector_gen_store = []
                    line = 0
                    for training_line in trainingdata_Store[Input_Column]:
                        vector_gen_store.append(
                            Encoding.gen_L_vec(
                                training_line,
                                trainingdata_Store[elements][line],
                                Default_Sentence_Length))
                        line += 1
                    vector_Store[elements].append(list(vector_gen_store))
            increment_line += len(vector_Store[0][0])
        #Save elements
        for d in range(Divercity):
            random.seed(random.randint(0, 100000))
            testing_size = round(len(vector_Store[0][0]) * Testing_Amount)
            for elements in range(len(trainingdata_Store)):
                if elements != Input_Column:
                    random.shuffle(vector_Store[elements][0])
                    random.shuffle(vector_Store[Input_Column][0])

                    data_to_split = vector_Store[elements][0]
                    input_data_to_split = vector_Store[Input_Column][0]

                    training_data = list(data_to_split)
                    testing_data = list(data_to_split[-testing_size:])

                    intput_training_data = list(input_data_to_split)
                    input_testing_data = list(
                        input_data_to_split[-testing_size:])

                    folder_for_store = Column_Label[
                        Input_Column] + '$$$' + Column_Label[elements]
                    folder_output_store = Column_Label[elements]
                    folder_input_store = Column_Label[Input_Column]
                    if not os.path.exists('./TrainingData/DataforTraining/' +
                                          folder_for_store + '/' +
                                          folder_output_store + '/'):
                        os.makedirs('./TrainingData/DataforTraining/' +
                                    folder_for_store + '/' +
                                    folder_output_store + '/')
                    if not os.path.exists('./TrainingData/DataforTraining/' +
                                          folder_for_store + '/' +
                                          folder_input_store + '/'):
                        os.makedirs('./TrainingData/DataforTraining/' +
                                    folder_for_store + '/' +
                                    folder_input_store + '/')

                    number_of_files_to_name = os.listdir(
                        './TrainingData/DataforTraining/' + folder_for_store +
                        '/' + folder_output_store + '/')

                    with open(
                            './TrainingData/DataforTraining/' +
                            folder_for_store + '/' + folder_output_store +
                            '/' + str(len(number_of_files_to_name)) +
                            '.pickle', 'wb') as f:
                        pickle.dump([training_data, testing_data],
                                    f,
                                    protocol=2)

                    with open(
                            './TrainingData/DataforTraining/' +
                            folder_for_store + '/' + folder_input_store + '/' +
                            str(len(number_of_files_to_name)) + '.pickle',
                            'wb') as f:
                        pickle.dump([intput_training_data, input_testing_data],
                                    f,
                                    protocol=2)

        return {"Complete": "Vectorising training data complete"}
Example #15
0
#     print(item)
print("time spend for building stem dictionary: " +
      str(s_end_time - s_start_time))

# print("Building lemma dictionary")
# l_start_time = time.time()
# lemma_processor = ProcessDoc.ProcessDoc(doc_path, 1, 0)
# lemma_processor.run()
# lemma_dic = lemma_processor.collection_dic
# l_sorted_terms = sorted(lemma_dic.items(), key=lambda kv: kv[0])
# l_end_time = time.time()

# print("time spend for building lemma dictionary: " + str(l_end_time - l_start_time))
# f = 8

encoder = Encoding.Encoding()


def block_compression(dictionary):
    print(" proceeding block compression ")
    block_compressed = encoder.blocked_compression(dictionary, 8)
    return block_compressed


def front_compression(dictionary):
    print(" proceeding front compression ")
    front_compressed = encoder.front_coding_compression(dictionary, 4, 8)
    return front_compressed


def gamma_compression(dictionary):
def Dataset(Name):

    cap = cv2.VideoCapture(1)
    count = 0
    text = 'Press S to save an image'
    color = (0, 255, 0)
    status = False
    Border = 10
    Run = Encoding.Check_Name(Name)
    Encoder_status = 0
    Primary_id = Encoding.Get_PID()
    while (Run):

        Ret, Frame = cap.read()

        font = cv2.FONT_HERSHEY_SIMPLEX

        Input_Key = cv2.waitKey(1)

        if Input_Key == ord('s'):

            Save_Frame = Frame.copy()
            Encoder_status = Encoding.Encoder(Save_Frame, Primary_id,
                                              Frame.shape)
            if Encoder_status == 1:
                text = 'Image saved'
                count = count + 1
                color = (0, 255, 0)
            elif Encoder_status == 2:
                text = 'Please move away from the camera'
                color = (0, 0, 255)
            elif Encoder_status == 3:
                text = 'Please move closer to the camera'
                color = (0, 0, 255)
            elif Encoder_status == 4:
                text = 'Stay in the center of the camera'
                color = (0, 0, 255)
            elif Encoder_status == 5:
                text = 'Too many faces found'
                color = (0, 0, 255)

        elif Input_Key == 27:
            Run = False

        if count == 5:
            Run = False

        cv2.putText(Frame, text, (int(Frame.shape[1] * 0.5 - len(text) * 7),
                                  int(Frame.shape[0] * 0.95)), font, 0.8,
                    color, 2, cv2.LINE_AA)
        #cv2.putText(Frame, 'Press Q to save an image' , (10, 20), font, 0.8, (0, 255, 0), 2, cv2.LINE_AA)
        cv2.putText(Frame, str(count), (int(Frame.shape[1] * 0.96), 20), font,
                    0.8, (0, 255, 0), 2, cv2.LINE_AA)

        cv2.imshow('Dataset Generator', Frame)

    if Encoder_status == 1:
        Description = input('Enter Description for {} :'.format(Name))
        status = Encoding.Add_Database(Primary_id, Name, Description)

    cap.release()
    cv2.destroyAllWindows()

    return status
import os
import Dataset
import Encoding

pwd = os.path.dirname(os.path.realpath(__file__))

if not os.path.isfile(pwd + '/Face_database.db'):
    Encoding.Make_Table()

Status = input(
    'Enter 1 to add new face.\n Enter 2 to scan face.\n Enter 3 to quit.\n')
if Status == '1':
    Name = input('Enter Your Name:')
    Dataset.Dataset(Name)
elif Status == '2':
    data = Encoding.unlock()
    print(data)
elif Status == '3':
    print('Quitting')
else:
    print("Invalid Entry")