-
Notifications
You must be signed in to change notification settings - Fork 0
/
functions.py
196 lines (150 loc) · 7.05 KB
/
functions.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 2 10:56:40 2020
@author: lenovo
"""
import json
import portalocker
import threading
from os import path
from datetime import datetime, timedelta
from dateutil.parser import parse
from configs.configurations import DEFAULT_DB_NAME
class DataStoreCRD:
def check_time_to_live(self, value):
# Checks how long the data is accessible.
created_time = value['CreatedAt']
# Parse the datetime from the string date.
created_time = parse(created_time)
time_to_live = value['Time-To-Live']
if time_to_live is not None:
# Calculate the data expire time.
expired_datetime = created_time + timedelta(seconds=time_to_live)
# Calculate the remaining seconds of expired time(may/may not expired) from current time.
remaining_seconds = (expired_datetime - datetime.now()).total_seconds()
if remaining_seconds <= 0:
return False
return value
def check_create_data(self, json_data, db_path):
if not isinstance(json_data, dict):
return False, "Incorrect request data format. Only JSON object with key-value pair is acceptable."
# Check for request data size. If size is greater than 1GB ignore the data.
data_obj = json.dumps(json_data)
if len(data_obj) > 1000000000:
return False, "DataStore limit will exceed 1GB size."
for key, value in json_data.items():
# Check for key in data for 32 char length.
if len(key) > 32:
return False, "The keys must be in 32 characters length."
# Check for value in data whether it is JSON object or not.
if not isinstance(value, dict):
return False, "The values must be in JSON object formats."
value_obj = json.dumps(value)
# Check for value JSON object is 16KB or less in size.
if len(value_obj) > 16384:
return False, "The values must be in 16KB size."
# Checks if DataStore exists.
# If datastore exists append existing datastore,
# else create a new datastore with data inserted.
datastore = path.join(db_path, DEFAULT_DB_NAME)
data = {}
if path.isfile(datastore):
with open(datastore) as f:
# Make sure single process only allowed to access the file at a time.
# Locking file.
portalocker.lock(f, portalocker.LOCK_EX)
data = json.load(f)
# Releasing the file lock.
portalocker.lock(f, portalocker.LOCK_UN)
# Check if file size exceeded 1GB size.
prev_data_obj = json.dumps(data)
if len(prev_data_obj) >= 1000000000:
return False, "File Size Exceeded 1GB."
# Check any key present in previous datastore data.
# If present return Error message
'''
# for key in json_data.keys():
# if key in data.keys():
# return False, "Key already exist in DataStore."
'''
have_key = any(x in json_data.keys() for x in data.keys())
if have_key:
return False, "Key already exist in DataStore."
""" Threading Mechanism Start """
def prepare_data_create(json_data_keys):
# Add CreatedAt time to data. Also add Time-To-Live if the data dont have in it.
for key in json_data_keys:
singleton_json = json_data[key]
singleton_json["CreatedAt"] = datetime.now().isoformat()
singleton_json["Time-To-Live"] = singleton_json["Time-To-Live"] if 'Time-To-Live' in singleton_json else None
data[key] = singleton_json
# No of threads are set to 4.
thread_count = 4
items = list(json_data.keys())
split_size = len(items) // thread_count
threads = []
for i in range(thread_count):
start = i * split_size
end = None if i+1 == thread_count else (i+1) * split_size
threads.append(threading.Thread(target=prepare_data_create, args=(items[start:end], ), name=f"t{i+1}"))
threads[-1].start()
# Wait for all threads to finish.
for t in threads:
t.join()
""" Threading Mechanism End """
# Write the new data.
with open(datastore, 'w+') as f:
# Make sure single process only allowed to access the file at a time.
# Locking file.
portalocker.lock(f, portalocker.LOCK_EX)
json.dump(data, f)
# Releasing the file lock.
portalocker.lock(f, portalocker.LOCK_UN)
return True, "Data created in DataStore."
def read_delete_preprocess(self, key, db_path):
datastore = path.join(db_path, DEFAULT_DB_NAME)
# Check for datastore existance.
if not path.isfile(datastore):
return False, "Empty DataStore. Data not found for the key."
# Read previous datastore data if exists.
with open(datastore) as f:
# Make sure single process only allowed to access the file at a time.
# Locking file.
portalocker.lock(f, portalocker.LOCK_EX)
data = json.load(f)
# Releasing the file lock.
portalocker.lock(f, portalocker.LOCK_UN)
# Check for the input key available in data.
if key not in data.keys():
return False, "No data found for the key provided."
# Check for the data for the key is active or inactive.
target = data[key]
target_active = self.check_time_to_live(target)
if not target_active:
return False, "Requested data is expired for the key."
return True, data
def check_read_data(self, key, db_path):
# Read data from the datasource for the given key.
status, message = self.read_delete_preprocess(key, db_path)
if not status:
return status, message
data = message[key]
del data['CreatedAt']
return status, data
def check_delete_data(self, key, db_path):
status, message = self.read_delete_preprocess(key, db_path)
if not status:
return status, message
datastore = path.join(db_path, DEFAULT_DB_NAME)
# Delete the data from the datastore.
# This action is not reversible.
del message[key]
# Write the new data to the datasource after data deletion.
with open(datastore, 'w+') as f:
# Make sure single process only allowed to access the file at a time.
# Locking file.
portalocker.lock(f, portalocker.LOCK_EX)
json.dump(message, f)
# Releasing the file lock.
portalocker.flock(f, portalocker.LOCK_UN)
return True, "Data is deleted from the datastore."