-
Notifications
You must be signed in to change notification settings - Fork 0
/
picman.py
1848 lines (1672 loc) · 62.4 KB
/
picman.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/python3.9
# picman.py
# Picture Manager: process image descriptors, rename, create thumbs
# Utilies used: jhead
# Version 07/18/2011
# Version 08/02/2011: introduced thumb control
# Version 08/15/2011: minor fix: process file names with different capitalization
# Version 11/12/2011: introduced file rename
# Version 02/18/2012: thumb alignment is always done internally using PIL
# Version 11/06/2012: introduced jpg comments from XPTitle field
# Version 12/23/2012: set mod times from creation times if no EXIF
# Version 12/31/2012: only centered thumbs are prepared
# Version 01/03/2013: use PIL to extract jpg comments and DateTimeOriginal; added -bg option
# Version 01/10/2013: fixed PIL/EXIF processing exceptions
# Version 01/25/2013: added processing of json descriptors *.dscj.txt
# Version 02/06/2013: new/updated *.dscj.txt descriptor is always regrouped
# Version 02/20/2013: enabled argparse, removed obsolete options
# Version 03/24/2013: introduced JsondscSplitLastGroup
# Version 06/01/2013: renamed to picman, modified -jn to specify the descriptor name
# Version 06/04/2013: added image renumbering
# Version 06/11/2013: thumbs recreated after image renumbering
# Version 10/25/2013: use Picasa/IPTC captions, abandon XPTitle field
# Version 10/29/2013: fixed unnecessary warnings for IPTC captions
# Version 04/22/2014: update get json descriptor
# Version 06/15/2014: implemented renaming in place (when certain file names don't change)
# Version 06/06/2015: introduced utf8
# Version 07/16/2016: introduced processing of Picasa-generated index
# Version 07/23/2016: instead of old JsondscSplitLastGroup logic, empty captions are made " ".
# now -jun produces the correct image groups
# enable full Picasa processing of IPTC captions
# Version 09/05/2016: rename modified to order dated images properly
# Version 06/19/2017: added notes processing
# Version 07/09/2017: added -mvd nand to get rid of non-alphanum characters in file names
# Version 07/30/2017: added -jue. Now only -jue produces envelope around json desc in *.dscj.txt
# To simplify correcting JSON syntax, -jn, -ju, -jun produce pure JSON in *.dscj.txt
# Version 11/18/2017: use split() instead of beautifulsoup to extract jpg's from Picasa index
# Version 11/19/2017: Win7 / CentOS version
# Version 03/25/2018: changed CLI to use pre-existing *.dscj.txt
# path arg retired
# now -mvc implements nand funcionality
# Version 03/30/2018: added -mvt option and renameExifTime()
# Version 07/01/2018: added getting notes from pre-existing *.dscj.txt
# Version 08/11/2018: added setDesc() to create descriptor if non-existent
# Version 10/22/2018: process .*jpg files
# Picasa index.html ===> index.bak
# Version 12/17/2018: update utf8()
# Version 08/19/2019: begin enabling gps captions
# Version 09/04/2019: disable IPTC warnings
# Version 09/15/2019: gps captions ver 1: introduced *.gps.txt, *.gps, .htm descriptors
# Version 09/21/2019: gps update / fix: -gpsn, -ghpsg, -gpsu
# -mvt removed, use -mvd instead
# Version 10/06/2019: restored -mvt using jhead. It is needed to merge images coming from multiple cameras
# added rmGpsDesc() to remove gps descriptors after image rename
# Version 11/23/2019: use only 240*240 thumbs
# Version 01/08/2020: now runs both in Python 2.7 and 3.8.0
# Version 02/02/2020: include gps descriptor in *.dscj
# Version 08/09/2020: -gpsn fixes for 3.8 compatability
# Version 09/27/2020: make *.gps.htm responsive
# Version 10/04/2020: make *.dscj.txt, *.dsc.htm responsive
# Version 10/27/2020: added picDir to dscj
# Version 03/14/2021: enable cp2ftp()
# Version 03/22/2021: use encoding='utf8' for open, close
# Version 04/07/2021: enable getGpsTzDt(). Now tzdt is determined by date, lat, lon in csv file
# Use json tag "tzdt" instead of "dst"
# Version 05/06/2021: use exifGet() to get exif info
# enable camera, lens info for -gpsg
# Version 05/08/2021: fix -jnt
# Version 05/10/2021: enable fromFtp()
# Version 06/01/2021: now mvt adds image number in the end. This helps to id images easily
# Version 06/14/2021: enable -gpsg -pv to preview images without IPTC info
# Version 07/24/2021: enable -cr2 to rename cr2 images. Introduce cr2 descriptor.
# Version 07/26/2021: now -gpsg accepts images with empty IPTC
# Version 12/21/2021: always use os.system() to call jhead
# enable EXIF FNumber for -gpsg
# Version 03/16/2022: enable cr2MarkUnused()
# Version 04/04/2022: fix date format for -gpsg -pv
# fix os.rename() issue in -cr2
# Version 05/16/2022: disable saving cr2 descriptor to ascii file
# Version 08/13/2022: enable movePicasaIndex()
# Version 08/19/2022: enable loadNotes(). When notes are prepared separately, load them to notes item
# Version 09/03/2022: use -jnb instead of -jnt with descriptor *.body.txt
#----------------------------------------------------------------------------------------------------------
import sys
import os, platform, glob, json, copy, re, uuid
import shutil
import argparse
import time
from time import sleep
from datetime import datetime, timedelta
import tzdata
from tzwhere import tzwhere
from zoneinfo import ZoneInfo
from builtins import str
import pprint
import csv
import base64
import exifread
pyVer = platform.python_version()
pyImport = ""
if pyVer[0]<="2":
try:
from iptcinfo import IPTCInfo
from PIL import Image
import validators
except Exception as e: pyImport = str(e)
else:
try:
from iptcinfo3 import IPTCInfo
from PIL import Image
import validators
except Exception as e: pyImport = str(e)
# How To Install Python 3.9 on Ubuntu 20.04|18.04:
# https://linuxize.com/post/how-to-install-python-3-9-on-ubuntu-20-04/
# When both 3.8 and 3.9 are installed, for 3.9 installation use python3.9 -m pip install x
# For python3:
# pip install iptcinfo3
# pip install validators
# pip install Pillow
# pip install tzdata
# pip install tzwhere
# pip install ExifRead
# For CentOS Python 2.7:
# yum -y install python-pip - if necessary
# yum install jhead
# pip install future
# pip install Pillow
# pip install iptcinfo
# pip install validators
#----------------------------------------------------------------------------------------------------
# All symbols after x'80' => HTML encoding $#xxx;
def utf8(str):
if (hasattr(str, "decode")): return str.decode('utf-8').encode('ascii', 'xmlcharrefreplace')
else: return str.encode('ascii', 'xmlcharrefreplace').decode('utf-8')
#----------------------------------------------------------------------------------------------------------
# Prepare thumb by resizing the image and
# placing it in the center of properly colored square
def ThumbC(imgI, Tsize, bgColor):
th = "_t"
if (Tsize!=120): th = "__t"
if (imgI.find(".jpg")>0): imgO = imgI.replace(".jpg", th + ".jpg")
else: imgO = imgI.replace(".JPG", th + ".JPG")
print ("picman: %s=>%s" % (imgI, imgO))
try:
blank = Image.new('RGB', (Tsize, Tsize), bgColor)
img = Image.open(imgI)
except Exception as e:
print ("ThumbC(): cant't use Pillow Image: %s" % (str(e)))
sys.exit()
width, height = img.size
if (width>=height):
THUMB_SIZE = (Tsize, (Tsize*height)//width)
BOX = (0, (Tsize - THUMB_SIZE[1])//2)
else:
THUMB_SIZE = ((width * Tsize)//height, Tsize)
BOX = ((Tsize - THUMB_SIZE[0])//2, 0)
step = 0
try:
img.thumbnail(THUMB_SIZE)
step = 1
blank.paste(img, BOX)
step = 2
blank.save(imgO)
except Exception as e:
print ("ThumbC(): failed step %d - %s" % (step, str(e)) )
sys.exit()
return
#----------------------------------------------------------------------------------------------------------
# Try to get Picasa/IPTC captions for jpg's with empty comments in List
def checkCaptions(List):
for i in range(0, len(List)):
iptc = iptcGet(List[i][0])[0]
curr = List[i][1]
# if (curr==""): curr = iptc # use IPTC only if nothing was found in jpg comment
if (iptc!=" "): curr = iptc # if there is IPTC data, use it
if (curr==""): curr = " " # return blank instead of empty
List[i][1] = curr
#print (List)
return List
def getimage(fname):
if (os.path.exists(fname) or not os.path.exists("./bak/" + fname)): return
shutil.copy2("./bak/" + fname, "./")
print ("getimage(): %s copied" % (fname))
return
#----------------------------------------------------------------------------------------------------------
# Regroup L:
# put in each row of L maximum number of comment-pic groups,
# so that each row has no more than MaxNPics pics.
# Returns the rearraged list and its # emty slots
def JsondscRegroup(L, MaxNPics):
# Prepare list of comment-iems groups
groups = []
for row in L:
#print ("row=" + str(row))
curr = []
for el in row:
if (not el.endswith(".jpg") and len(curr)>0):
groups.append(curr)
curr = []
curr.append(el)
if (len(curr)>0): groups.append(curr)
LOut = []
Out = []
NPics = 0
OutNPics = 0
MaxLen = MaxNPics
for gr in groups:
#print ("MaxNPics: %s OutNPics: %s %s" % (MaxNPics, OutNPics, str(gr)))
NPics = NPics + len(gr)-1
if (OutNPics>=MaxNPics):
LOut.append(Out) # LOut <= Out
Out = []
OutNPics = 0
if (OutNPics+len(gr)-1<=MaxNPics):
Out = Out + gr # Out <= gr
OutNPics = OutNPics + len(gr)-1
continue
if (len(gr)-1>=MaxNPics and OutNPics==0):
LOut.append(gr) # LOut <= gr
MaxLen = max(MaxLen, len(gr)-1)
continue
if (len(gr)-1>=MaxNPics and OutNPics>0):
LOut.append(Out)
Out = []
OutNPics = 0
LOut.append(gr)
MaxLen = max(MaxLen, len(gr)-1)
continue
LOut.append(Out) # LOut <= Out
MaxLen = max(MaxLen, len(gr)-1)
OutNPics = len(gr)-1
Out = gr
if (len(Out)>1):
LOut.append(Out)
NEmpty = MaxLen*len(LOut)-NPics
print ("JsondscRegroup(): MaxNPics=%s MaxLen=%s NEmpty=%s" % (MaxNPics, MaxLen, NEmpty))
return [LOut, NEmpty]
#----------------------------------------------------------------------------------------------------------
# Choose regrouping with minimal NEmpty
def JsondscRegroupMin(Rows, MaxNPics):
NEmpty = 1000
LOut = []
for i in range(0, 3):
R = JsondscRegroup(Rows, MaxNPics-i)
if (NEmpty>R[1]):
NEmpty = R[1]
LOut = R[0]
return LOut
#----------------------------------------------------------------------------------------------------------
# *.body.txt => *.dscj.txt
# Convert old descriptors to JSON
def body2dscj(fn, MaxNPics, getimages):
F = open(fn, "r", encoding='utf8')
try: L = F.readlines()
except Exception as e:
print ("body2dscj(): failed to read %s - %s" % (dscname, str(e)))
return
if (len(L)==0):
print ("body2dscj(): empty " + fname)
return
# ASCII descriptor => list of comment-pics rows
Rows = []
header = []
for el in L: # no pics
if (not ".jpg" in el and not "[" in el and not "]" in el):
header += [el]
if (not ".jpg" in el): continue
el = el.replace("]", "")
el = el.replace("[", "")
el = el.replace("_t.jpg", ".jpg")
el = el.replace("http://images/", "")
#print ("==>" + el)
comment = ""
if (el.find(":")>=0):
tmp = el.split(":")
comment = tmp[0]
pics = tmp[1]
else: pics = el
row = [comment] + pics.split()
Rows.append(row)
notes = []
for el in header:
if (el.count(":")==0):
notes.append([el.strip(), ""])
continue
if (el.count(":")==1):
notes.append(["", el.strip()])
continue
el = el.replace("://", "///")
el = el.split(":")
el[1] = el[1].replace("///", "://").strip()
notes.append(el)
if (len(notes)==0): notes = [["", ""], ["", ""], ["", ""]]
#print Rows
LOut = JsondscRegroupMin(Rows, MaxNPics)
# Prepare JSON descrptor file
root = fn.replace(".body.txt", "")
Out = {"picDir": root, "notes": notes, root: LOut}
fn = fn.replace(".body.txt", ".dscj.txt")
desc = json.dumps(Out, indent=1, sort_keys=True)
try:
f = open(fn, "w", encoding='utf8')
f.write(utf8(str(desc)))
f.close()
except Exception as e:
print ("body2dscj(): failed to write %s - %s" % (fn, str(e)))
return
return
#JsonDscProcs(fname, 0, getimages, None) # process new descriptor immediately
#----------------------------------------------------------------------------------------------------------
# Get comments from jpg files and their IPTC.Caption's
# Create json descritor *.dscj.txt
def GetJpgComments(descname, List, MaxNPics, getimages):
Res = []
# get comments from jpg files in List
for fname in List:
if fname.lower().endswith("_t.jpg"): continue # ignore thumbs
try:
app = Image.open(fname).app
except:
print ("GetJpgComments(): Failed to process %s" % (fname))
exit(0)
comment = ""
if ("COM" in app):
comment = ""
try:
comment = app["COM"].decode('utf-8') # needed for python 3
except Exception as e:
print ("GetJpgComments(): Failed to to get comment from %s - %s" % (fname, str(e)))
comment = comment.replace("\x00", "")
Res.append([fname, comment])
if len(Res)==0:
return
Res = checkCaptions(Res)
# prepare the descriptors
#print ("=>" + str(Res))
Out = ""
LOut = []
Curr = []
#print (Captions)
for el in Res:
#print (el)
fname = el[0]
el[0] = el[0].replace(".jpg", "_t.jpg")
if el[1]!="":
if (len(Curr))>0: LOut.append(Curr)
Curr = []
try:
Out = Out + "\n" + el[1] + ": http://images/" + el[0]
Curr = [el[1], fname]
except:
Out = Out + "\n : http://images/" + el[0]
Curr = [el[1], fname]
print ("GetJpgComments(): Wrong symbol in %s comment" % (fname))
else:
Out = Out + " http://images/" + el[0]
Curr.append(fname)
Out = Out[1:] + "\n"
if (len(Curr)>0): LOut.append(Curr)
LOut1 = JsondscRegroupMin(LOut, MaxNPics)
# Prepare the descriptors
LOut = {descname : LOut1, "picDir": descname}
LOut["notes"] = [["", ""], ["", ""]]
# try to get notes from pre-existing desc
fs = 0
tmp = {}
try:
fs = os.path.getsize(descname + ".dscj.txt")
except: pass
if (fs>0): tmp = JsonDscGet(descname + ".dscj.txt")
if ("notes" in tmp):
LOut["notes"] = copy.deepcopy(tmp["notes"])
print ("GetJpgComments(): got pre-existing notes")
#json.dump(LOut, open(descname + ".dscj.txt", "w"), indent=1, sort_keys=True, encoding ="latin1")
json.dump(LOut, open(descname + ".dscj.txt", "w", encoding='utf8'), indent=1, sort_keys=True)
JsonDscProcs(descname + ".dscj.txt", 0, getimages, None) # process new descriptor immediately
return len(Res)
#----------------------------------------------------------------------------------------------------------------\
# JSON row => HTML table
def JsonRowProcs(row, getimages):
if (row[0].endswith(".jpg")): row = [""] + row
# Prepare the list of comment-items groups
groups = []
curr = []
for el in row:
if (not el.endswith(".jpg") and len(curr)>0):
groups.append(curr)
curr = []
curr.append(el)
if (len(curr)>0): groups.append(curr)
# Prepare HTML table for this JSON row
cell_size = 120
anormfmt = "<a target=win_link href=./images/%s.jpg><img class=th_small src=./images/%s__t.jpg></a>"
aviewfmt = "<a target=win_link href=./%s.jpg><img class=th_small src=./%s__t.jpg></a>"
tdheadfmt = "<td id=tdc colspan=%s width=%s>%s</td>\n"
tdmainfmt = "<td id=tdi>%s</td>\n"
trfmt = "<tr>%s</tr>\n"
tablefmt = "<table id=tabi>\n%s%s</table>\n"
Res_norm = ""
Res_view = ""
header = ""
main_norm = ""
main_view = ""
for gr in groups:
ncols = len(gr)-1
gr = procGroup(gr)
if (not gr): continue
for el in gr:
# prepare header and main rows
if (not el.endswith(".jpg")):
header = header + tdheadfmt % (ncols, ncols*cell_size, el)
continue
if (getimages): getimage(el)
el = el.replace(".jpg", "")
anorm = anormfmt % (el, el)
aview = aviewfmt % (el, el)
main_norm = main_norm + tdmainfmt % (anorm)
main_view = main_view + tdmainfmt % (aview)
header = trfmt % (header)
main_norm = trfmt % (main_norm)
main_view = trfmt % (main_view)
Res_norm = Res_norm + tablefmt % (header, main_norm)
Res_norm = Res_norm.replace("\n</tr>", "</tr>")
Res_view = Res_view + tablefmt % (header, main_view)
Res_view = Res_view.replace("\n</tr>", "</tr>")
return [Res_norm, Res_view]
#----------------------------------------------------------------------------------------------------------------\
# Get json descriptor from the given file and return the dict
def JsonDscGet(fname):
try:
F = open(fname, "r", encoding='utf8')
# get json descriptor
F_ = " " + utf8(F.read()) + " "
# get json descriptor
if ("<!--dscj" in F_):
F_ = F_.split("<!--dscj")
if ("-->" in F_[1]): F_ = F_[1].split("-->")
else: F_ = F_[1].split("->")
else: F_ = [F_]
IN = json.loads(F_[0])
F.close()
except Exception as e:
print ("JsonDscGet(): Wrong %s - %s" % (fname, str(e)))
return {}
return IN
#----------------------------------------------------------------------------------------------------------------
def loadNotes(fnname):
res = [["", ""]]
try:
F = open(fname, "r", encoding='utf8')
F_ = utf8(F.read())
if (not "<!--dscj" in F_): return res
F_ = F_.split("<!--dscj")[0].strip().split("\n")
except Exception as e:
print ("loadNotes(): Can't get %s - %s" % (fname, str(e)))
return res
res = []
http = "http://"
for el in F_:
if (": http://" in el): el = el.split(": http://")
else:
http = "https://"
el = el.split(": https://")
if (len(el)==1): res.append([el[0], ""])
else: res.append([el[0], http + el[1]])
return res
#----------------------------------------------------------------------------------------------------------------\
# Update *.dscj.txt to include HTML tables
# Create *.dscj.htm to view the images in the current directory
def JsonDscProcs(fname, MaxNPics, getimages, env):
IN = JsonDscGet(fname)
if (IN=={}): return
NOTES = [["", ""]]
if ("notes" in IN):
NOTES = IN["notes"]
del IN["notes"]
else: NOTES = loadNotes(fname)
GPS = None
if ("gps" in IN):
GPS = IN["gps"]
del IN["gps"]
gpsd = getGpsDesc()
if (gpsd): GPS = gpsd
if (not gpsd): GPS = None
INkey = None
if ("picDir" in IN):
INkey = IN["picDir"]
ink = list(IN.keys())
if (INkey==None): INkey = ink[0]
IN1 = IN[INkey]
if (MaxNPics>0):
IN1 = JsondscRegroupMin(IN1, MaxNPics)
Res_norm = ""
Res_view = ""
for row in IN1:
[norm, view] = JsonRowProcs(row, getimages)
Res_norm = Res_norm + norm
Res_view = Res_view + view
Res_notes = notesProcs(NOTES)
# Write .dscj.txt
IN1 = {INkey: IN1, "picDir": INkey}
if (NOTES): IN1["notes"] = NOTES
if (GPS): IN1["gps"] = GPS
print ("JsonDscProcs(): env=" + str(env))
jdump = json.dumps(IN1, indent=1, sort_keys=True)
spanId = "<span id=\"picDir=%s\"/>" % (INkey)
if (env):
Res_norm = "<!--%s\n%s\n-->\n%s\n%s\n" % ("dscj", jdump, spanId, Res_norm)
Res_norm = Res_notes.replace("<br>", "") + Res_norm
else:
Res_norm = jdump
fname_norm = INkey + ".dscj.txt"
try:
F = open(fname_norm, "w", encoding='utf8')
F.write(utf8(Res_norm))
F.close()
except Exception as e:
print ("JsonDscProcs(): failed to write %s - %s" % (fname_norm, e))
# Write .dscj.htm
fmt = ""
scriptdir = os.path.dirname(os.path.realpath(__file__))
fmtfile = scriptdir.replace("\\", "/") + "/picman.htm"
try:
F = open(fmtfile, encoding='utf8')
fmt = F.read()
F.close()
except:
print ("JsonDscProcs(): failed to read " + fmtfile)
return
Res_view = "<!--%s\n%s\n-->\n%s\n%s" % ("dscj", json.dumps(IN, indent=1, sort_keys=True), spanId, Res_view)
if (fmt!=""): Res_view = fmt % (INkey, INkey, Res_view)
fname_view = INkey + ".dsc.htm"
try:
F = open(fname_view, "w", encoding='utf8')
F.write(utf8(Res_view))
F.close()
except Exception as e:
print ("JsonDscProcs(): failed to write %s - %s" % (fname_view, str(e)))
return
print ("JsonDscProcs(): %s, %s created" %(fname_norm, fname_view))
return
#----------------------------------------------------------------------------------------------------------------
# Check and process the notes pairs
def notesProcs(IN):
# Check that this is a list of [string, string] pairs
if (not IN.__class__.__name__=="list"):
print ("notesProcs(): Wrong notes in %s" % (IN))
pprint.pprint(IN)
return ""
for el in IN:
if (not el.__class__.__name__=="list"):
print ("notesProcs(): Wrong note %s" % (el))
pprint.pprint(el_)
return ""
if (len(el)!=2):
print ("notesProcs(): Wrong note %s" % (el))
pprint.pprint(el)
return ""
str = el[1].__class__.__name__=="str" or el[1].__class__.__name__=="unicode"
if (el[0].__class__.__name__!="str" and not str ):
print ("notesProcs(): Wrong note (%s, %s) in %s" % (el[0].__class__.__name__, el[1].__class__.__name__, el))
pprint.pprint(el)
return ""
if (not el[1]=="" and not validators.url(el[1])):
print ("notesProcs(): Wrong note [%s, %s]" % (el[0], el[1]))
return ""
res = ""
for el in IN:
if (el[0]=="" and el[1]==""): continue
if (el[0]==""):
res = res + el[1] + "<br>\n"
continue
if (el[1]==""):
res = res + el[0] + "<br>\n"
continue
res = res + el[0] + ": " + el[1] + "<br>\n"
#print ("notesProcs(): res=" + res;)
print ("notesProcs(): %d notes processed" % (len(IN)))
return res
#----------------------------------------------------------------------------------------------------------------
# Put comments into jpg's for this JSON descriptor using jhead
def JsondscPutComments(fname):
IN = JsonDscGet(fname)
INkeys = list(IN.keys())
if ("notes" in INkeys):
INkeys.remove("notes")
if ("gps" in INkeys):
INkeys.remove("gps")
INkey = None
if ("picDir" in INkeys):
INkey = IN["picDir"]
if (INkey==None and len(INkeys)!=1):
print ("JsondscPutComments(): wrong keys in desc")
pprint.pprint(INkeys)
return
if (INkey==None): INkey = INkeys[0]
IN = IN[INkey]
N = 0
comment = " "
for row in IN:
for fn in row:
if (not fn.endswith(".jpg")):
comment = fn
continue
if not os.path.exists(fn):
print ("JsondscPutComments(): stop - %s not found" % (fn))
return
cmd = "jhead -cl \"%s\" %s > nul" % (comment, fn)
os.system(cmd)
iptcSet(fn, comment, None)
N = N + 1
comment = " "
print ("JsondscPutComments(): %s images processed with jhead" % (N))
return
#----------------------------------------------------------------------------------------------------------------\
# Renumber the images in fname and recreate it
def JsondscRenum(fname):
IN = JsonDscGet(fname)
if (not "picDir" in IN):
print ("JsondscRenum(%s): no picDir" % (fname))
return []
INkey = IN["picDir"]
print ("JsondscRenum(): %s, %s" % (fname, INkey))
NOTES = []
if ("notes" in IN):
NOTES = IN["notes"]
del IN["notes"]
IN = IN[INkey]
Pics = []
OUT = []
Wrong = [] # non-existent pictures if any
for group in IN:
for el in group:
OUT.append(el)
if (el.endswith(".jpg")):
if (not os.path.exists(el)): Wrong.append(el)
Pics.append(el)
el_t = el[0:len(el)-4] + "_t.jpg" #remove thumbs
el__t = el[0:len(el)-4] + "__t.jpg"
if (os.path.exists(el_t)): os.remove(el_t)
if (os.path.exists(el__t)): os.remove(el__t)
if (len(Wrong)>0):
print ("JsondscRenum() failed. The following files do not exist: %s" % (str(Wrong)))
return []
# renumber the files
prefix = str(uuid.uuid4())
rename(False, prefix, Pics)
List = glob.glob(prefix + ".*.jpg")
List.sort()
rename(False, INkey, List)
List = glob.glob(INkey + ".*.jpg")
List.sort()
Pics = copy.deepcopy(List)
# prepare the new descriptor
RES = []
for el in OUT:
if (not el.endswith(".jpg")): RES.append(el)
else: RES.append(List.pop(0))
OUT = {}
OUT[INkey] = [RES]
if (len(NOTES)>0):
OUT["notes"] = NOTES
json.dump(OUT, open(fname, "w", encoding='utf8'), indent=1, sort_keys=True)
# remove extra images if any
N = len(Pics) + 1
while (os.path.exists("%s.%03d.jpg" % (INkey, N))):
os.remove("%s.%03d.jpg" % (INkey, N))
N = N + 1
print ("JsondscRenum(): %s images processed. %s extra images removed" % (len(Pics), N-len(Pics)-1))
return Pics
#----------------------------------------------------------------------------------------------------------------
def movePicasaIndex():
pIndex = "index.html"
if (os.path.exists(pIndex)): return True # already in place
picasaDir = os.getenv("picman.picasa", None)
if (not picasaDir):
print("movePicasaIndex(): picman.picasa not set")
return False
pIndex = picasaDir + "/" + os.getcwd().replace("\\", "/").split("/")[-1] + "/index.html"
if (not os.path.exists(pIndex)):
print("movePicasaIndex(): no " + pIndex)
return False
shutil.move(pIndex, ".")
print ("movePicasaIndex(): moved " + pIndex)
return True
#----------------------------------------------------------------------------------------------------------------
def procPicasaIndex():
pIndex = "index.html"
print ("procPicasaIndex(): try using Picasa-generated %s" % (pIndex))
L = []
if (not movePicasaIndex()):
print ("procPicasaIndex(): %s not found" % (pIndex))
return L
try:
F = open(pIndex, "r", encoding='utf8')
F_ = utf8(F.read()).lower()
F.close()
L_ = []
if ("img" in F_): L_ = F_.split("<img ")[1:]
for item in L_:
if (not "src=" in item): continue
item = item.split("src=")[1]
if (not ".jpg" in item): continue
item = item.split(".jpg")[0] + ".jpg"
item = item.split("/")[-1]
item = item.replace("_", ".")
ok = not item in L and os.path.exists(item)
if (not ok):
L = []
print ("procPicasaIndex(): wrong item " + item)
break
L.append(item)
except Exception as e:
L = []
print ("procPicasaIndex(): wrong %s: %s" % (pIndex, str(e)) )
if (len(L)==0): print ("procPicasaIndex(): cannot use %s" % (pIndex))
else:
fnbak = pIndex.replace(".html", ".bak")
if (os.path.exists(fnbak)): os.remove(fnbak)
os.rename(pIndex, fnbak)
print ("procPicasaIndex(): %s ===> %s" % (pIndex, fnbak))
return L
#----------------------------------------------------------------------------------------------------------------\
# Rename files in List to: prefix.nnn[.date].ext
# For prefix = "": replace non-alphanum characters by dots.
def rename(addDate, prefix, List):
fname = ""
if (".htm" in List[0]): fname = List[0]
if (fname!=""):
List = procPicasaIndex()
if (fname!="" and len(List)>0):
print ("rename(): used %s, %d items" % (fname, len(List)))
if (len(List)==0):
print ("rename(): nothing to process")
return 0
# Prepare new names
print ("rename(): %d items to process" % (len(List)))
InPlace = False
uid = str(uuid.uuid4()).split("-")[0] + "."
N = 0
List_ = []
for el in List:
if (el.lower().endswith("_t.jpg")): # no thumbs in the list
os.remove(el)
continue
N = N + 1
el = el.replace("\\", "/")
el_ = el.split("/")
el_ = el_[0:len(el_)-1]
el_ = "/".join(el_)
if (el_!=""): el_ = el_ + "/"
ext = el.split(".")
ext = ext[len(ext)-1]
now = ""
if addDate:
nowsec = os.path.getmtime(el)
now = "." + time.strftime('%Y.%m.%d', time.localtime(nowsec))
if (prefix!=""): name = el_ + "%s.%.03d%s.%s" % (prefix.lower(), N, now, ext.lower())
else: name = (re.sub('[^a-zA-Z0-9]', '.', el)).lower()
if (name.startswith(".")): name = "0" + name
#print ("=>%s: %s*%s" % (prefix, name, el))
if (name!=el and os.path.exists(name)):
InPlace = True
if (name!=el):
List_.append([el, name])
if (InPlace): print ("rename(): in place - stage 1")
else:
uid = ""
print ("rename(): do it")
List = List_
for el in List:
name = uid + el[1]
if (os.path.exists(name)): os.remove(name)
print ("rename(): %s=>%s" % (el[0], name))
os.rename(el[0], name)
if (not InPlace): return N
print ("rename(): in place - stage 2")
for el in List:
if (el[0]==""): continue
name = el[1]
tmpname = uid + el[1]
if (os.path.exists(name)): os.remove(name)
print ("rename(): %s=>%s" % (tmpname, name))
tryMore = False
try:
os.rename(tmpname, name)
except:
tryMore = True
if (tryMore):
sleep(0.75)
try:
os.rename(tmpname, name)
except:
print ("rename(): %s=>%s failed 2 times" % (tmpname, name))
name_t = name[0:len(name)-4] + "_t.jpg" #remove thumbs
name__t = name[0:len(name)-4] + "__t.jpg"
if (os.path.exists(name_t)): os.remove(name_t)
if (os.path.exists(name__t)): os.remove(name__t)
return N
#----------------------------------------------------------------------------------------------------------
# Group is a list, its head is caption, tail is captioned images
# Find the first image if any with gps info and make caption a hyperlink to Google Maps.
def procGroup(gr):
if (len(gr)<2):
print ("procGroup(): wrong group " + str(gr))
return gr
if ("empty" in gpsDesc): iniGpsDesc()
if ("empty" in gpsDesc): return gr
(head, tail) = (gr[0], gr[1:])
found = ""
for el in tail:
if (el in gpsDesc):
found = gpsDesc[el]
break
if (found==""): return gr
fmatGooMaps = "https://www.google.com/maps/?q=%s"
fmatA = "<a class=butt target=win_link href=%s>%s</a>"
link = found
if (not link.startswith("http")): link = fmatGooMaps % (link)
if (head.strip()==""): head = "map"
head = fmatA % (link, head)
gr[0] = head
#print ("dbg " + str(gr))
return gr
#----------------------------------------------------------------------------------------------------------
# Initialize gpsDesc for procGroup()
gpsDesc = {"empty": 1}
def iniGpsDesc():
global gpsDesc
desc = getGpsDesc()
if (desc==None or not "tzdt" in desc or not "root" in desc): return
desc = desc["root"]
for item in desc:
if (item[-1]=="y"):
gpsDesc[item[1]] = item[3]
k = list(gpsDesc.keys())
if (len(k)>1): del gpsDesc["empty"]
#print ("iniGpsDesc()" + gpsDesc)
print ("iniGpsDesc() %d items in gpsDesc" % (len(k)))
return
#----------------------------------------------------------------------------------------------------------
def getGpsTzDt(date, lat, lon):
date = date.split("-")
date = [ int(date[0]), int(date[1]), int(date[2])]
tzw = tzwhere.tzwhere()
tz_str = ""
tzdt = 0
try:
tz_str = tzw.tzNameAt(lat, lon)
t = datetime(date[0], date[1], date[2], tzinfo=ZoneInfo(tz_str))
tzdt = int(str(t)[-6:-3])
except Exception as e:
print("Failed getGpsTzDt(): error=" + str(e))
return 0
print ("getGpsTzDt(): TZ=%s%+d" % (tz_str, tzdt))
return tzdt
#----------------------------------------------------------------------------------------------------------
# Create json descriptor *.gps.txt with links to Google maps. Use csv files from GPS Logger.
# dst - offset for Zulu, Ljpg - list of jpg's in this dir
def crGpsDesc(Ljpg):
Lcsv = glob.glob("*.csv")
if (len(Lcsv)==0):
print ("crGpsDesc(): no csv to process")
return
res = []
# add image items
for item in Ljpg:
if (item.endswith("_t.jpg")): continue
t = exifGet(item)[0]
if (t==""): continue
t = t.replace(" ", ".")
t = t.replace(":", "")
res.append([t, "", "", item, "jpg"])
if (len(Ljpg)==0):
print ("crGpsDesc(): no jpg to process")
return
for fn in Lcsv:
curr = []
try:
with open(fn, "r", encoding='utf8') as f:
reader = csv.reader(f)
for row in reader: curr.append(row)
except Exception as e:
print ("crGpsDesc(): Failed to read %s - %s" % (fn, str(e)))
continue
if (len(curr)<2 or len(curr[0])<3 or curr[0][0]!="time" or curr[0][1]!="lat" or curr[0][2]!="lon"):
print ("crGpsDesc(): Wrong %s" % (fn))
continue
curr.pop(0)
date, lat, lon = curr[0][0], float(curr[0][1]), float(curr[0][2])
date = date.replace("T", "-")
date = date.replace(":", "-")
date = date.replace(".", "-")