forked from ourresearch/oadoi
-
Notifications
You must be signed in to change notification settings - Fork 0
/
queue_separate_table.py
575 lines (466 loc) · 25 KB
/
queue_separate_table.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
import os
import argparse
from time import time
from time import sleep
from sqlalchemy import sql
from sqlalchemy import exc
from subprocess import call
import heroku3
import boto.ec2
from boto.manage.cmdshell import sshclient_from_instance
from pprint import pprint
import datetime
from app import db
from app import logger
from jobs import update_registry
import jobs_defs # needs to be imported so the definitions get loaded into the registry
from util import elapsed
from util import run_sql
from util import get_sql_answer
from util import get_sql_answers
from util import clean_doi
from app import HEROKU_APP_NAME
from pub import Pub
# to get the clarivate dois in
# date; grep "WOS:" DOI_Output.txt | sed 's:\\:\\\\:g' | psql postgres://uc1l3d6vod6nsk:p5f54c0e9c8bb4067420ab6e6eb78a4a93234db67fbd3eede893a9a86781a484d@ec2-34-204-251-168.compute-1.amazonaws.com:5432/dds97qbhb1bu4i?ssl=true -c "copy dois_from_wos (wos_id) from STDIN;"; date;
def monitor_till_done(job_type):
logger.info(u"collecting data. will have some stats soon...")
logger.info(u"\n\n")
num_total = number_total_on_queue(job_type)
print "num_total", num_total
num_unfinished = number_unfinished(job_type)
print "num_unfinished", num_unfinished
loop_thresholds = {"short": 30, "long": 10*60, "medium": 60}
loop_unfinished = {"short": num_unfinished, "long": num_unfinished}
loop_start_time = {"short": time(), "long": time()}
# print_idle_dynos(job_type)
while all(loop_unfinished.values()):
for loop in ["short", "long"]:
if elapsed(loop_start_time[loop]) > loop_thresholds[loop]:
if loop in ["short", "long"]:
num_unfinished_now = number_unfinished(job_type)
num_finished_this_loop = loop_unfinished[loop] - num_unfinished_now
loop_unfinished[loop] = num_unfinished_now
# if loop=="long":
# logger.info(u"\n****"),
logger.info(u" {} finished in the last {} seconds, {} of {} are now finished ({}%). ".format(
num_finished_this_loop, loop_thresholds[loop],
num_total - num_unfinished_now,
num_total,
int(100*float(num_total - num_unfinished_now)/num_total)
)), # comma so the next part will stay on the same line
if num_finished_this_loop:
minutes_left = float(num_unfinished_now) / num_finished_this_loop * loop_thresholds[loop] / 60
logger.info(u"{} estimate: done in {} mins, which is {} hours".format(
loop, round(minutes_left, 1), round(minutes_left/60, 1)))
else:
print
loop_start_time[loop] = time()
# print_idle_dynos(job_type)
print".",
sleep(3)
logger.info(u"everything is done. turning off all the dynos")
scale_dyno(0, job_type)
def number_total_on_queue(job_type):
num = get_sql_answer(db, "select count(*) from {}".format(table_name(job_type)))
return num
def number_waiting_on_queue(job_type):
num = get_sql_answer(db, "select count(*) from {} where started is null".format(table_name(job_type)))
return num
def number_unfinished(job_type):
num = get_sql_answer(db, "select count(*) from {} where finished is null".format(table_name(job_type)))
return num
def print_status(job_type):
num_dois = number_total_on_queue(job_type)
num_waiting = number_waiting_on_queue(job_type)
if num_dois:
logger.info(u"There are {} dois in the queue, of which {} ({}%) are waiting to run".format(
num_dois, num_waiting, int(100*float(num_waiting)/num_dois)))
def kick(job_type):
q = u"""update {table_name} set started=null, finished=null
where finished is null""".format(
table_name=table_name(job_type))
run_sql(db, q)
print_status(job_type)
def reset_enqueued(job_type):
q = u"update {} set started=null, finished=null".format(table_name(job_type))
run_sql(db, q)
def truncate(job_type):
q = "truncate table {}".format(table_name(job_type))
run_sql(db, q)
def table_name(job_type):
table_name = "doi_queue"
if job_type=="hybrid":
table_name += "_with_hybrid"
elif job_type=="dates":
table_name += "_dates"
return table_name
def process_name(job_type):
# process_name = "run" # formation name is from Procfile
process_name = "update" # formation name is from Procfile
if job_type=="hybrid":
process_name += "_with_hybrid"
elif job_type=="dates":
process_name += "_dates"
return process_name
def num_dynos(job_type):
heroku_conn = heroku3.from_key(os.getenv("HEROKU_API_KEY"))
num_dynos = 0
try:
dynos = heroku_conn.apps()[HEROKU_APP_NAME].dynos()[process_name(job_type)]
num_dynos = len(dynos)
except (KeyError, TypeError) as e:
pass
return num_dynos
def print_idle_dynos(job_type):
heroku_conn = heroku3.from_key(os.getenv("HEROKU_API_KEY"))
app = heroku_conn.apps()[HEROKU_APP_NAME]
running_dynos = []
try:
running_dynos = [dyno for dyno in app.dynos() if dyno.name.startswith(process_name(job_type))]
except (KeyError, TypeError) as e:
pass
dynos_still_working = get_sql_answers(db, "select dyno from {} where started is not null and finished is null".format(table_name(job_type)))
dynos_still_working_names = [n for n in dynos_still_working]
logger.info(u"dynos still running: {}".format([d.name for d in running_dynos if d.name in dynos_still_working_names]))
# logger.info(u"dynos stopped:", [d.name for d in running_dynos if d.name not in dynos_still_working_names])
# kill_list = [d.kill() for d in running_dynos if d.name not in dynos_still_working_names]
def scale_dyno(n, job_type):
logger.info(u"starting with {} dynos".format(num_dynos(job_type)))
logger.info(u"setting to {} dynos".format(n))
heroku_conn = heroku3.from_key(os.getenv("HEROKU_API_KEY"))
app = heroku_conn.apps()[HEROKU_APP_NAME]
app.process_formation()[process_name(job_type)].scale(n)
logger.info(u"sleeping for 2 seconds while it kicks in")
sleep(2)
logger.info(u"verifying: now at {} dynos".format(num_dynos(job_type)))
def login_to_aws():
logger.info(u"logging in to aws")
conn = boto.ec2.connect_to_region('us-west-2')
# instance = conn.get_all_instances()[0].instances[0]
ssh_client = None
for reservation in conn.get_all_instances():
instance = reservation.instances[0]
try:
if not ssh_client:
ssh_client = sshclient_from_instance(instance, "data/key.pem", user_name="ec2-user")
print u"this instance worked: {}".format(instance)
except Exception:
pass
return (conn, ssh_client)
# clarivate
# python queue_separate_table.py --export_with_versions --week
# or, run this on aws
# create table export_main_changed_with_versions_20170118 as (select * from export_main_changed_with_versions where last_changed_date > '2018-01-10'::timestamp)
# it takes about 45 minutes
# then this
# python queue_separate_table.py --export --view=export_main_changed_with_versions_20180118
# which takes about 5 minutes
# mv all_dois*.csv datasets_for_clarivate
# mv all_dois*.csv.gz datasets_for_clarivate
def export_with_versions(do_all=False, job_type="normal", filename=None, view=None, week=False, json=False):
# ssh -i /Users/hpiwowar/Dropbox/ti/certificates/aws-data-export.pem ec2-user@ec2-13-59-23-54.us-east-2.compute.amazonaws.com
# aws s3 cp test.txt s3://mpr-ims-harvestor/mpr-ims-dev/harvestor_staging_bigBatch/OA/test.txt
# connect to our bucket
(conn, ssh_client) = login_to_aws()
# to connect to clarivate's bucket
# clarivate_conn = boto.ec2.connect_to_region('us-east-2')
# clarivate_instance = clarivate_conn.get_all_instances()[0].instances[0]
# clarivate_ssh_client = sshclient_from_instance(clarivate_instance, "/Users/hpiwowar/Dropbox/ti/certificates/aws-data-export.pem", user_name="ec2-user")
logger.info(u"log in done")
now_timestamp = datetime.datetime.utcnow().isoformat()[0:19].replace(":", "")
if not filename:
filename = "all_dois_{}.csv".format(now_timestamp)
today = datetime.datetime.utcnow()
if week:
last_week = today - datetime.timedelta(days=9)
view = "export_main_changed_with_versions where last_changed_date >= '{}'::timestamp and updated > '1043-01-01'::timestamp".format(last_week.isoformat()[0:19])
filename = "changed_dois_with_versions_{}_to_{}.csv".format(last_week.isoformat()[0:19], today.isoformat()[0:19]).replace(":", "")
else:
filename = "dois_with_versions_{}.csv".format(today.isoformat()[0:19]).replace(":", "")
if not view:
view = "export_main_changed_with_versions"
command = """psql {}?ssl=true -c "\copy (select * from {}) to '{}' WITH (FORMAT CSV, HEADER);" """.format(
os.getenv("DATABASE_URL"), view, filename)
logger.info(command)
status, stdout, stderr = ssh_client.run(command)
logger.info(u"{} {} {}".format(status, stdout, stderr))
command = """gzip -c {} > {}.gz;""".format(
filename, filename)
logger.info(command)
status, stdout, stderr = ssh_client.run(command)
logger.info(u"{} {} {}".format(status, stdout, stderr))
command = """date -r {}.gz;""".format(
filename)
logger.info(command)
status, stdout, stderr = ssh_client.run(command)
logger.info(u"{} {} {}".format(status, stdout, stderr))
gz_modified = stdout.strip()
# command = """aws s3 cp {}.gz s3://mpr-ims-harvestor/mpr-ims-dev/harvestor_staging_bigBatch/OA/{}.gz --acl public-read --metadata "modifiedtimestamp='{}'";""".format(
# filename, filename, gz_modified)
command = """aws s3 cp {}.gz s3://oadoi-for-clarivate/{}.gz --acl public-read --metadata "modifiedtimestamp='{}'";""".format(
filename, filename, gz_modified)
logger.info(command)
status, stdout, stderr = ssh_client.run(command)
logger.info(u"{} {} {}".format(status, stdout, stderr))
# also make a .DONE file
# how to calculate a checksum http://www.heatware.net/linux-unix/how-to-create-md5-checksums-and-validate-a-file-in-linux/
command = """md5sum {}.gz > {}.gz.DONE;""".format(
filename, filename)
logger.info(command)
status, stdout, stderr = ssh_client.run(command)
logger.info(u"{} {} {}".format(status, stdout, stderr))
command = """date -r {}.gz;""".format(
filename)
logger.info(command)
status, stdout, stderr = ssh_client.run(command)
logger.info(u"{} {} {}".format(status, stdout, stderr))
gz_done_modified = stdout.strip()
# copy up the .DONE file
# command = """aws s3 cp {}.gz.DONE s3://mpr-ims-harvestor/mpr-ims-dev/harvestor_staging_bigBatch/OA/{}.gz.DONE --acl public-read --metadata "modifiedtimestamp='{}'";""".format(
# filename, filename, gz_done_modified)
command = """aws s3 cp {}.gz.DONE s3://oadoi-for-clarivate/{}.gz.DONE --acl public-read --metadata "modifiedtimestamp='{}'";""".format(
filename, filename, gz_done_modified)
logger.info(command)
status, stdout, stderr = ssh_client.run(command)
logger.info(u"{} {} {}".format(status, stdout, stderr))
# logger.info(u"now go to *** https://console.aws.amazon.com/s3/object/mpr-ims-harvestor/mpr-ims-dev/harvestor_staging_bigBatch/OA/{}.gz?region=us-east-1&tab=overview ***".format(
# filename))
logger.info(u"public link is at *** https://s3-us-west-2.amazonaws.com/oadoi-for-clarivate/{}.gz ***".format(
filename))
conn.close()
# for weekly update
# python queue_separate_table.py --export_no_versions --week --json
# 2 steps
# this step took 5.5 hours for a table of 93540542 rows
# on aws: create table export_main_no_versions_20180116 as (select * from export_main_no_versions)
# then from my prompt
# python queue_separate_table.py --export_no_versions --view export_main_no_versions_20180116
# aws s3 cp all_dois_20180122T165326.csv.gz s3://oadoi-datasets/oa_status_by_doi.csv.gz --acl public-read; date;
# or, for just the changed one
# python queue_separate_table.py --export_no_versions --view="export_main_changed_no_versions where last_changed_date >= '2018-01-21'::timestamp"
def export_no_versions(do_all=False, job_type="normal", filename=None, view="export_main_no_versions", week=False, json=False):
(conn, ssh_client) = login_to_aws()
logger.info(u"log in done")
today = datetime.datetime.utcnow()
if week:
last_week = today - datetime.timedelta(days=9)
if json:
view = "pub where last_changed_date >= '{}'::timestamp and updated > '1043-01-01'::timestamp".format(last_week.isoformat()[0:19])
filename = "changed_dois_{}_to_{}.jsonl".format(last_week.isoformat()[0:19], today.isoformat()[0:19]).replace(":", "")
else:
view = "export_main_changed_no_versions where last_changed_date >= '{}'::timestamp and updated > '1043-01-01'::timestamp".format(last_week.isoformat()[0:19])
filename = "changed_dois_{}_to_{}.csv".format(last_week.isoformat()[0:19], today.isoformat()[0:19]).replace(":", "")
else:
if json:
filename = "full_dois_{}.jsonl".format(today.isoformat()[0:19]).replace(":", "")
else:
filename = "full_dois_{}.csv".format(today.isoformat()[0:19]).replace(":", "")
if json:
command = """psql {}?ssl=true -c "\copy (select response_jsonb from {}) to '{}';" """.format(
os.getenv("DATABASE_URL"), view, filename)
else:
command = """psql {}?ssl=true -c "\copy (select * from {}) to '{}' WITH (FORMAT CSV, HEADER);" """.format(
os.getenv("DATABASE_URL"), view, filename)
logger.info(command)
status, stdout, stderr = ssh_client.run(command)
logger.info(u"{} {} {}".format(status, stdout, stderr))
if json:
command = """sed -i 's/"publishedVersion"/null/g; s/"submittedVersion"/null/g; s/"acceptedVersion"/null/g' {}""".format(filename)
logger.info(command)
status, stdout, stderr = ssh_client.run(command)
logger.info(u"{} {} {}".format(status, stdout, stderr))
command = """gzip -c {} > {}.gz; date;""".format(
filename, filename)
logger.info(command)
status, stdout, stderr = ssh_client.run(command)
logger.info(u"{} {} {}".format(status, stdout, stderr))
command = """aws s3 cp {}.gz s3://unpaywall-data-updates/{}.gz --acl public-read; date; """.format(
filename, filename)
logger.info(command)
status, stdout, stderr = ssh_client.run(command)
logger.info(u"{} {} {}".format(status, stdout, stderr))
# also make a .DONE file
# how to calculate a checksum http://www.heatware.net/linux-unix/how-to-create-md5-checksums-and-validate-a-file-in-linux/
command = """md5sum {}.gz > {}.gz.DONE; date;""".format(
filename, filename)
logger.info(command)
status, stdout, stderr = ssh_client.run(command)
logger.info(u"{} {} {}".format(status, stdout, stderr))
# copy up the .DONE file
command = """aws s3 cp {}.gz.DONE s3://unpaywall-data-updates/{}.gz.DONE --acl public-read; date;""".format(
filename, filename)
logger.info(command)
status, stdout, stderr = ssh_client.run(command)
logger.info(u"{} {} {}".format(status, stdout, stderr))
logger.info(u"now go to *** https://console.aws.amazon.com/s3/object/unpaywall-data-updates/{}.gz?region=us-east-1&tab=overview ***".format(
filename))
logger.info(u"public link is at *** https://s3-us-west-2.amazonaws.com/unpaywall-data-updates/{}.gz ***".format(
filename))
conn.close()
def print_logs(job_type):
command = "heroku logs -t --dyno={}".format(process_name(job_type))
call(command, shell=True)
def add_dois_to_queue_from_file(filename, job_type):
start = time()
command = """psql `heroku config:get DATABASE_URL`?ssl=true -c "\copy {table_name} (id) FROM '{filename}' WITH CSV DELIMITER E'|';" """.format(
table_name=table_name(job_type), filename=filename)
call(command, shell=True)
q = "update {} set id=lower(id)".format(table_name(job_type))
run_sql(db, q)
logger.info(u"add_dois_to_queue_from_file done in {} seconds".format(elapsed(start, 1)))
print_status(job_type)
def add_dois_to_queue_from_query(where, job_type):
logger.info(u"adding all dois, this may take a while")
start = time()
table_name = "doi_queue"
# run_sql(db, "drop table {} cascade".format(table_name(job_type)))
# create_table_command = "CREATE TABLE {} as (select id, random() as rand, null::timestamp as finished, null::timestamp as started, null::text as dyno from crossref)".format(
# table_name(job_type))
create_table_command = "CREATE TABLE {} as (select id, random() as rand, null::timestamp as finished, null::timestamp as started from pub);".format(
table_name)
if where:
create_table_command = create_table_command.replace("from pub)", "from pub where {})".format(where))
run_sql(db, create_table_command)
create_table_command += """
alter table {table_name} alter column rand set default random();
CREATE INDEX {table_name}_id_idx ON {table_name} USING btree (id);
CREATE INDEX {table_name}_finished_null_rand_idx on {table_name} (rand) where finished is null;
CREATE INDEX {table_name}_started_null_rand_idx ON {table_name} USING btree (rand, started) WHERE started is null;
-- from https://lob.com/blog/supercharge-your-postgresql-performance
-- vacuums and analyzes every ten million rows
ALTER TABLE {table_name} SET (autovacuum_vacuum_scale_factor = 0.0);
ALTER TABLE {table_name} SET (autovacuum_vacuum_threshold = 10000000);
ALTER TABLE {table_name} SET (autovacuum_analyze_scale_factor = 0.0);
ALTER TABLE {table_name} SET (autovacuum_analyze_threshold = 10000000);
""".format(
table_name=table_name)
for command in create_table_command.split(";"):
run_sql(db, command)
command = """create or replace view export_queue as
SELECT id AS doi,
updated AS updated,
response_jsonb->>'evidence' AS evidence,
response_jsonb->>'oa_status' AS oa_color,
response_jsonb->>'free_fulltext_url' AS best_open_url,
response_jsonb->>'year' AS year,
response_jsonb->>'found_hybrid' AS found_hybrid,
response_jsonb->>'found_green' AS found_green,
response_jsonb->>'error' AS error,
response_jsonb->>'is_boai_license' AS is_boai_license,
replace(api->'_source'->>'journal', '
', '') AS journal,
replace(api->'_source'->>'publisher', '
', '') AS publisher,
api->'_source'->>'title' AS title,
api->'_source'->>'subject' AS subject,
response_jsonb->>'license' AS license
FROM pub where id in (select id from {table_name})""".format(
table_name=table_name(job_type))
# if job_type:
# command_with_hybrid = command.replace("response_jsonb", "response_with_hybrid").replace("export_queue", "export_queue_with_hybrid")
run_sql(db, command)
# they are already lowercased
logger.info(u"add_dois_to_queue_from_query done in {} seconds".format(elapsed(start, 1)))
print_status(job_type)
def run(parsed_args, job_type):
start = time()
if job_type in ("normal", "hybrid"):
update = update_registry.get("Pub."+process_name(job_type))
if parsed_args.doi:
parsed_args.id = clean_doi(parsed_args.doi)
parsed_args.doi = None
else:
update = update_registry.get("DateRange.get_unpaywall_events")
# update = update_registry.get("DateRange.get_pmh_events")
update.run(**vars(parsed_args))
logger.info(u"finished update in {} seconds".format(elapsed(start)))
resp = None
if job_type in ("normal", "hybrid"):
my_pub = Pub.query.get(parsed_args.id)
resp = my_pub.response_jsonb
pprint(resp)
return resp
# python doi_queue.py --hybrid --filename=data/dois_juan_accuracy.csv --dynos=40 --soup
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run stuff.")
parser.add_argument('--id', nargs="?", type=str, help="id of the one thing you want to update (case sensitive)")
parser.add_argument('--doi', nargs="?", type=str, help="id of the one thing you want to update (case insensitive)")
parser.add_argument('--filename', nargs="?", type=str, help="filename with dois, one per line")
parser.add_argument('--addall', default=False, action='store_true', help="add everything")
parser.add_argument('--where', nargs="?", type=str, default=None, help="""where string for addall (eg --where="response_jsonb->>'oa_status'='green'")""")
parser.add_argument('--hybrid', default=False, action='store_true', help="if hybrid, else don't include")
parser.add_argument('--dates', default=False, action='store_true', help="use date queue")
parser.add_argument('--all', default=False, action='store_true', help="do everything")
parser.add_argument('--week', default=False, action='store_true', help="for the last week")
parser.add_argument('--json', default=False, action='store_true', help="as json not csv")
parser.add_argument('--view', nargs="?", type=str, default=None, help="view name to export from")
parser.add_argument('--reset', default=False, action='store_true', help="do you want to just reset?")
parser.add_argument('--run', default=False, action='store_true', help="to run the queue")
parser.add_argument('--status', default=False, action='store_true', help="to logger.info(the status")
parser.add_argument('--dynos', default=None, type=int, help="scale to this many dynos")
parser.add_argument('--export_with_versions', default=False, action='store_true', help="export the results")
parser.add_argument('--export_no_versions', default=False, action='store_true', help="export the results")
parser.add_argument('--logs', default=False, action='store_true', help="logger.info(out logs")
parser.add_argument('--monitor', default=False, action='store_true', help="monitor till done, then turn off dynos")
parser.add_argument('--soup', default=False, action='store_true', help="soup to nuts")
parser.add_argument('--kick', default=False, action='store_true', help="put started but unfinished dois back to unstarted so they are retried")
parsed_args = parser.parse_args()
job_type = "normal"
if parsed_args.hybrid:
job_type = "hybrid"
if parsed_args.dates:
job_type = "dates"
if parsed_args.filename:
if num_dynos(job_type) > 0:
scale_dyno(0, job_type)
truncate(job_type)
add_dois_to_queue_from_file(parsed_args.filename, job_type)
if parsed_args.addall or parsed_args.where:
if num_dynos(job_type) > 0:
scale_dyno(0, job_type)
add_dois_to_queue_from_query(parsed_args.where, job_type)
if parsed_args.soup:
if num_dynos(job_type) > 0:
scale_dyno(0, job_type)
if parsed_args.dynos:
scale_dyno(parsed_args.dynos, job_type)
else:
logger.info(u"no number of dynos specified, so setting 1")
scale_dyno(1, job_type)
monitor_till_done(job_type)
scale_dyno(0, job_type)
export_with_versions(parsed_args.all, job_type, parsed_args.filename, parsed_args.view)
else:
if parsed_args.dynos != None: # to tell the difference from setting to 0
scale_dyno(parsed_args.dynos, job_type)
# if parsed_args.dynos > 0:
# print_logs(job_type)
if parsed_args.reset:
reset_enqueued(job_type)
if parsed_args.status:
print_status(job_type)
if parsed_args.monitor:
monitor_till_done(job_type)
scale_dyno(0, job_type)
if parsed_args.logs:
print_logs(job_type)
if parsed_args.export_with_versions:
export_with_versions(parsed_args.all, job_type, parsed_args.filename, parsed_args.view, parsed_args.week, parsed_args.json)
if parsed_args.export_no_versions:
export_no_versions(parsed_args.all, job_type, parsed_args.filename, parsed_args.view, parsed_args.week, parsed_args.json)
if parsed_args.kick:
kick(job_type)
if parsed_args.id or parsed_args.doi or parsed_args.run:
run(parsed_args, job_type)
# takes 4 hours
# \copy (select response_jsonb from pub) to 'jsonb_export_20180329_113154.jsonl
# takes 10 mins each
# date; sed -i 's/"publishedVersion"/null/g' jsonb_export_20180329_113154.jsonl; date
# date; sed -i 's/"submittedVersion"/null/g' jsonb_export_20180329_113154.jsonl; date
# date; sed -i 's/"acceptedVersion"/null/g' jsonb_export_20180329_113154.jsonl; date
# date; sed -i 's/\\\\/\\/g' jsonb_export_20180329_113154.jsonl; date
# date; sed -i 's/\n\n/\n/g' jsonb_export_20180329_113154.jsonl; date
# takes 40 minutes
# date; gzip -c jsonb_export_20180329_113154.jsonl > jsonb_export_20180329_113154.jsonl.gz; date;
# date; aws s3 cp jsonb_export_20180329_113154.jsonl.gz s3://unpaywall-data-updates/jsonb_export_20180329_113154.jsonl.gz --acl public-read; date;