#!/usr/bin/env python
from os import listdir
from os.path import isfile, join
import csv
import json
import persistqueue
import requests
import shutil
scanpath='/home/ganesh/uploads'
processedPath='/home/ganesh/processed'
rows = []
csvFileNames = [f for f in listdir(scanpath) if isfile(join(scanpath, f))]
jobQueue = persistqueue.UniqueAckQ('csv-processor')
for csvFileName in csvFileNames:
    fqFileName=scanpath + '/' + csvFileName
    fqDestFilename=processedPath + '/' + csvFileName
    try:
        with open(fqFileName) as csvfile:
            reader = csv.DictReader(csvfile)
            for row in reader:
                try:
                    convrow = dict((k.lower().replace(' ', '_'), v) for k,v in row.iteritems())
                    convrow['wfSource'] = 'delhi_covid_positive_csv'
                    body = { 'id' : 'delhi_csv', 'body' : convrow, 'apiPath' : csvFileName }
                    jobQueue.put(json.dumps(body))
                    print('Queued job: ' + convrow['icmr_id'])
                except:
                    print('Error processing row: ' + str(row))
    except:
        print('Error processing file: ' + fqFileName)
    print('Items queued for file [' + csvFileName + ']: ' + str(jobQueue.size))
from os.path import isfile, join
import csv
import json
import persistqueue
import requests
import shutil
import time
import calendar
import math
import datetime
import re
scanpath = 'upload_aes_monitoring'
processedPath = 'processed_aes_monitoring'
rows = []
csvFileNames = [f for f in listdir(scanpath) if isfile(join(scanpath, f))]
jobQueue = persistqueue.UniqueAckQ('aes-monitoring')
statesmanUrl = "http://localhost:8080"
foxtrotUrl = "http://localhost:8082"
date_fields = ['end_date']
phones = set()
stateWorkflows = {"bihar": "c0c11200-0630-439d-a458-9ac21fdfa2a8"}
CURRENT_DATE = datetime.date.today()
DAY_START_TIME = (int(
    datetime.datetime(CURRENT_DATE.year, CURRENT_DATE.month, CURRENT_DATE.day,
                      0, 0, 0).strftime('%s'))) * 1000
mandatoryFields = ['name', 'mobile_number', 'state', 'end_date']


def now():
    return calendar.timegm(time.gmtime()) * 1000
from os import listdir
from os.path import isfile, join
import csv
import json
import persistqueue
import requests
import shutil
import time
import calendar
import math

scanpath = 'upload_end_all_workflows'
processedPath = 'processed_end_all_workflows'
rows = []
csvFileNames = [f for f in listdir(scanpath) if isfile(join(scanpath, f))]
jobQueue = persistqueue.UniqueAckQ('end-all-workflows')
fqls = [
    """select distinct(eventData.workflowId) from statesman where eventData.data.mobile_number = '%s'""",
    """select distinct(eventData.workflowId) from statesman where eventData.data.contact_number = '%s'""",
    """select distinct(eventData.workflowId) from statesman where eventData.data.phone = '%s'"""
]


def now():
    return calendar.timegm(time.gmtime()) * 1000


def epoch_time(str_time):
    return (int(time.mktime(time.strptime(str_time, "%d/%m/%Y")))) * 1000

from os import listdir
from os.path import isfile, join
import csv
import json
import persistqueue
import requests
import shutil
import time
import calendar
import math

scanpath = 'upload_covid_monitoring'
processedPath = 'processed_covid_monitoring'
rows = []
csvFileNames = [f for f in listdir(scanpath) if isfile(join(scanpath, f))]
jobQueue = persistqueue.UniqueAckQ('covid-monitoring')
statesmanUrl = "http://localhost:8080"
phones = set()
stateWorkflows = {
    "delhi":
    "3efd0e4b-a6cc-4e59-9f88-bb0141a66142",
    "punjab":
    "933bed6c-e6a6-4de4-9ea8-7a31d64a08dc','11dd4791-472b-454b-8f7a-39a589a6335c"
}


def now():
    return calendar.timegm(time.gmtime()) * 1000


def epoch_time(str_time):
#!/usr/bin/env python
from os import listdir
from os.path import isfile, join
import csv
import json
import persistqueue
import requests
import shutil
scanpath = 'uploads'
processedPath = 'processed'
rows = []
csvFileNames = [f for f in listdir(scanpath) if isfile(join(scanpath, f))]
jobQueue = persistqueue.UniqueAckQ('ka-covid-csv-processor')
fieldsRequired = [
    'repeat_sample_negative_on', 'symptom_status', 'geotag_ward',
    'date_of_hospitalization', 'created_on', 'district_of_residence',
    'test_method', 'owner', 'date_of_sample_collection', 'secondary_contacts',
    'hospitalization_type', 'contact_number', 'modified_by', 'flight_details',
    'geotag_address', 'created_by', 'record_status', 'patient_name',
    'symptoms', 'email', 'modified_on', 'district_p_code', 'date_of_travel',
    'date_of_discharge', 'icmr_address', 'description', 'state_p_code',
    'confirmation_date', 'patient_id', 'hospitalization_at',
    'state_of_residence', 'date_of_death', 'icmr_ward', 'symptoms_condition',
    'geotag_zone', 'remarks', 'status_reason', 'result_declared_on',
    'admitted_district', 'primary_contacts', 'gender', 'age', 'current_status',
    'record_created_on', 'icmr_id', 'icmr_zone', 'status', 'location_address',
    'repeat_sample_sent_on', 'laboratory_code', 'laboratory_name',
    'date_of_onset_of_symptoms'
]

for csvFileName in csvFileNames: