Пример #1
0
def golive(config, shelvedb_paths=[]):
    if shelvedb_paths:
        csvfilepath = None
        for shelvedb_path in shelvedb_paths:
            columns = []
            name = shelvedb_path.rpartition('/')[-1]
            for dimension in config.dimensions:
                if name == dimension.name:
                    columns = dimension.all
                    break
            shelveddb = LRUShelve(shelvedb_path, 2000, readonly=True)
            fd, csvfilepath = tempfile.mkstemp(suffix='.csv', prefix=name)
            tmpfile = file(csvfilepath, 'w')
            csvwriter = UnicodeWriter(tmpfile, delimiter='\t')
            for key, rows in shelveddb.iterms():
                for row in rows:
                    values = []
                    for i in range(len(columns)):
                        value = row[i]
                        if value == None:
                            value = ''
                        values.append(value)
                    csvwriter.writerow(values)
            tmpfile.close()
            os.close(fd)
            config.UDF_pgcopy(name, columns, '\t', None, '', file(csvfilepath))
            shelveddb.close()
            os.remove(csvfilepath)
Пример #2
0
def write_csv(info_dics, output):
    """
	write infomation to csv
	"""

    with open(output, 'w+') as o:
        fw = UnicodeWriter(o, encoding="utf-8")
        fw.writerow(info_dics[0].keys())
        for d in info_dics:
            fw.writerow(d.values())
Пример #3
0
    def _combine_csv(self, output):

        with open(output, 'w+') as o:
            fw = UnicodeWriter(o)
            fw.writerow(table_title)
            for file in self.files:
                if not file.endswith('.csv'):
                    continue
                itable = self._read_restruct_csv(file)
                for i in itable:
                    fw.writerow(i)
Пример #4
0
 def shelved2onlinedb(self):
     if len(self.shelveddb) > 0:
         csvfilepath = None
         curs = None
         try:
             fd, csvfilepath = tempfile.mkstemp(suffix='.csv',
                                                prefix=self.name)
             tmpfile = file(csvfilepath, 'w')
             csvwriter = UnicodeWriter(tmpfile, delimiter='\t')
             for k in self.shelveddb:
                 csvwriter.writerows(self.shelveddb[k])
             tmpfile.close()
             os.close(fd)
             curs = self.con.cursor()
             curs.execute('TRUNCATE %s' % self.name)
             curs.copy_from(file=file(csvfilepath),
                            table=self.name,
                            sep='\t',
                            null='',
                            columns=self.all)
         finally:
             if curs:
                 curs.close()
             os.remove(csvfilepath)
Пример #5
0
        try:
            label = tag[u'android:label']
        except KeyError:
            label = app_label
        if label.startswith(u'@ref/'):
            label = get_label(label[len(u'@ref/'):], pkg, apk)
        name = tag[u'android:name']
        if name.startswith('.'):
            name = pkg + name
        star = apk.split('/')[-1][:-len('.apk')]
        print('[%4d] %s %s "%s"' % (i, star, name, label))
        apps.append([star, name, label])


for apk in glob.glob('../apks/wear/*.apk'):
    i += 1
    run(i, apk)

with open('pkg_wfs_label.csv', 'wb') as f:
    writer = UnicodeWriter(f)
    # writer = csv.writer(f, encoding='utf-8')
    for wf in apps:
        print('[write] %s' % wf)
        writer.writerow(wf)

with open('pkg_wfs_label.csv', 'rb') as f:
    reader = UnicodeReader(f)
    # reader = (f, encoding='utf-8')
    for row in reader:
        print('[read] %s' % row)
Пример #6
0
	fs=fields.split("|")
	return set(fs)==set(filter(lambda a: h[a],h.keys()))
	
	
def output_row(h,row) :
	rrow=[]
	for f in row :
		if f in h:
			rrow.append(h[f])
		else :
			rrow.append("")
	return rrow



ofile=UnicodeWriter(open(files["csv"],"w"))
for fn in files["in"] :
	try :
		tree=etree.parse(open(fn),etree.HTMLParser())

		data=[]
		drows=0
		props=fnmeta(fn)
		props["file"]=os.path.split(fn)[1]
		for (k,v) in parse["properties"].items() :
			a=stringstring(extract(tree,v))
			if a is not None:
				props[k]=a
		#pprint.pprint(props)
		for container in tree.xpath(parse["container"]) :
			cdata=[]
Пример #7
0
					res.append(r)
        return res

rr=[]
for f in input_files.split("\n")[:5] :
    try :
        n=parsefile(f)
        if len(n)>0 :
            rr.extend(n)
            print "%s - %s" % (len(n),f)
        else :
            print "!! - %s" % (f,)
    except Exception, e:
		raise
		print "!!! - %s - %s" % (f,e)
        
import simplejson,string

rr_germany=filter(lambda a : string.lower("%(category)s %(share)s" % a).find("germany")>-1,rr)
simplejson.dump(rr,open(os.path.join(output_dir,"resultate-runde2.json"),"w"))
simplejson.dump(rr_germany,open(os.path.join(output_dir,"resultate-runde2-germany.json"),"w"))

from unicodecsv import UnicodeWriter

uw=UnicodeWriter(open(os.path.join(output_dir,"resultate-runde2.csv"),"w"),delimiter=";")
uw.writerows(map(lambda a: [a["date"],a["category"],a.get("fund",""),a["share"],a["number"],a["price"],a["file"]], rr))

uw=UnicodeWriter(open(os.path.join(output_dir,"resultate-runde2-germany.csv"),"w"),delimiter=";")
uw.writerows(map(lambda a: [a["date"],a["category"],a.get("fund",""),a["share"],a["number"],a["price"],a["file"]], rr_germany))

Пример #8
0
					res.append(r)
        return res

rr=[]
for f in ffn.split("\n") :
    try :
        n=parsefile(f)
        if len(n)>0 :
            rr.extend(n)
            print "%s - %s" % (len(n),f)
        else :
            print "!! - %s" % (f,)
    except Exception, e:
		raise
		print "!!! - %s - %s" % (f,e)
        
import simplejson,string

rr_germany=filter(lambda a : string.lower("%(category)s %(share)s" % a).find("germany")>-1,rr)
simplejson.dump(rr,open("data/resultate-runde2.json","w"))
simplejson.dump(rr_germany,open("resultate-runde2-germany.json","w"))

from unicodecsv import UnicodeWriter

uw=UnicodeWriter(open("data/resultate-runde2.csv","w"),delimiter=";")
uw.writerows(map(lambda a: [a["date"],a["category"],a["share"],a["number"],a["price"],a["file"]], rr))

uw=UnicodeWriter(open("data/resultate-runde2-germany.csv","w"),delimiter=";")
uw.writerows(map(lambda a: [a["date"],a["category"],a["share"],a["number"],a["price"],a["file"]], rr_germany))

Пример #9
0
        return res

rr=[]
for f in input_files:
	try :
		n=parsefile(f)
		if n : 
			print "%s - %s" % (len(n),f)
			rr.extend(n)
		else : 
			print "!! - %s " % (f,)
	except Exception, e:
		raise
		print "!!! - %s - %s" % (f,e)
	_me=os.path.splitext(os.path.split(f)[1])[0]

import simplejson,string

rr_germany=filter(lambda a : string.lower("%(category)s %(share)s %(fund)s" % defaultdict(lambda:"",a)).find("germany")>-1,rr)
simplejson.dump(rr,open(os.path.join(output_dir,"resultate-%s.json" % _me ),"w"))
simplejson.dump(rr_germany,open(os.path.join(output_dir,"resultate-%s-germany.json" % _me),"w"))

from unicodecsv import UnicodeWriter

uw=UnicodeWriter(open(os.path.join(output_dir,"resultate-%s.csv" % _me),"w"),delimiter=";")
uw.writerows(map(lambda a: [a["date"],a["category"],a.get("fund",""),a["share"],a["number"],a["price"],a["file"]], rr))

uw=UnicodeWriter(open(os.path.join(output_dir,"resultate-%s-germany.csv" % _me),"w"),delimiter=";")
uw.writerows(map(lambda a: [a["date"],a["category"],a.get("fund",""),a["share"],a["number"],a["price"],a["file"]], rr_germany))

Пример #10
0
def bucket_download(request, id):
    # TODO: access control
    message = ""
    bucket = get_bucket(request, id)

    if request.method == 'POST':
        try:
            zipfile_name = request.POST['zipfile_name']
        except KeyError:
            zipfile_name = bucket.default_zipfile_name()
            if zipfile_name == "":
                messages.error(request, 'No zip file name selected.')
                c = RequestContext(request, {'title': 'FeedDB Explorer'})
                return render_to_response('explorer/base.html', c)

        if not zipfile_name.endswith(".zip"):
            zipfile_name += ".zip"

        download_choice = request.POST['download_choice']
        channel_choice = request.POST['channel_choice']
        #meta_option= request.POST['meta_option']
        quotechar_char = '"'
        #delimiter= request.POST['delimiter']
        #if delimiter =="tab":
        #    delimiter_char = '\t'
        #elif delimiter =="comma":
        #    delimiter_char = ','
        #else:
        delimiter_char = ','

        #get selected fields
        field_selected = []
        for item in request.POST.items():
            if (item[1] == "on" and item[0].startswith("chk:")):
                field_selected.append(item[0])
                message += item[0] + "\n"
        if (download_choice == "0"
                or download_choice == "2") and len(field_selected) == 0:
            messages.error(request, 'No fields selected.')
            c = RequestContext(request, {'title': 'FeedDB Explorer'})
            return render_to_response('explorer/base.html', c)
        meta_selected = {}
        for field in field_selected:
            parts = field.split(":")
            if not parts[1] in meta_selected:
                meta_selected[parts[1]] = []
            parameter = parts[1] + ":" + parts[2]
            meta_selected[parts[1]].append([parts[2], request.POST[parameter]])

        #get selected channels
        channel_selected = []
        channel_headers = []
        for item in request.POST.items():
            if (item[1] == "on" and item[0].startswith("channel:")):
                channel_selected.append(item[0])
                message += item[0] + "\n"
        if (channel_choice == "1" and len(channel_selected) == 0):
            messages.error(request, 'No channels selected.')
            c = RequestContext(request, {'title': 'FeedDB Explorer'})
            return render_to_response('explorer/base.html', c)
        channel_download = []
        channel_selected.sort()
        trials_download = []
        for ch in channel_selected:
            parts = ch.split(":")
            channel_download.append([parts[1], parts[2]])
            channel_headers.append("Trial %s:Channel %s" %
                                   (parts[1], parts[2]))
            if not parts[1] in trials_download:
                trials_download.append(parts[1])
        filenames = {}

        # create a temporary folder to store files
        from time import time
        tempdir = settings.EXPLORER_TEMPORARY_FOLDER + "/" + str(
            time()).replace('.', '')

        try:
            os.makedirs(tempdir)
        except OSError, err:
            messages.error(
                request,
                'Failed to create folder for storing downloaded files.')
            c = RequestContext(request, {'title': 'FeedDB Explorer'})
            return render_to_response('explorer/base.html', c)

        #
        # create meta data if the user has chosen to do so
        #
        if (download_choice == "0" or download_choice == "2"):
            #create trials mate data file and out it into the temp zip file
            full_filename = "%s/trials.csv" % tempdir
            filenames["trials.csv"] = full_filename

            metaWriter = UnicodeWriter(open(full_filename, "w"),
                                       delimiter=delimiter_char,
                                       doublequote='false',
                                       escapechar='\\',
                                       quotechar=quotechar_char,
                                       quoting=csv.QUOTE_MINIMAL)

            #output trials
            #output headers
            headers = ["Trial:ID"]
            for key, value in meta_selected.items():
                if not key in ('Setup', 'EmgSetup', 'SonoSetup', 'Sensor',
                               'EmgSensor', 'SonoSensor', 'Channel',
                               'EmgChannel', 'SonoChannel', 'PressureChannel',
                               'ForceChannel', 'StrainChannel',
                               'KinematicsChannel', 'EventChannel'):
                    for v in value:
                        headers.append(v[1])
            metaWriter.writerow(headers)

            objects = {}
            for trial in bucket.trials.all():
                values = [trial.id]
                objects["Session"] = trial.session
                objects["Experiment"] = trial.session.experiment
                objects["Study"] = trial.session.experiment.study
                objects["Subject"] = trial.session.experiment.subject
                objects["Trial"] = trial
                for key, value in meta_selected.items():
                    if key in objects:
                        for v in value:
                            s = getattr(objects[key], v[0])
                            if hasattr(s, 'split'):
                                ss = s.split('\r\n')
                                if len(ss) > 1:
                                    s = ' '.join(ss)

                            values.append(s)

                metaWriter.writerow(values)

            #output channels
            #generate channel headers
            headers = ["Channel:ID"]
            for key, value in meta_selected.items():
                #generate headers meta data
                if key in ('Setup', 'EmgSetup', 'SonoSetup', 'Sensor',
                           'EmgSensor', 'SonoSensor', 'Channel', 'EmgChannel',
                           'SonoChannel', 'PressureChannel', 'ForceChannel',
                           'StrainChannel', 'KinematicsChannel',
                           'EventChannel'):
                    for v in value:
                        headers.append(v[1])

            for key, value in meta_selected.items():
                #generate headers for 2 meta data (specifically for crystal2 in sono data
                if key in ('Sensor', 'SonoSensor'):
                    for v in value:
                        headers.append('Sensor 2:%s' % v[1])

            channel_types = [
                'strainchannel', 'forcechannel', 'pressurechannel',
                'kinematicschannel'
            ]
            for trial in bucket.trials.all():
                #trial_name = trial.title.replace('.', '').replace(',', '').replace(' ', '_').strip().lower()
                #filename = "trial_%d_%s_channels.csv" % (trial.id, trial_name)
                #full_filename = "%s/trial_%d_%s_channels.csv" % (tempdir, trial.id,trial_name)
                filename = "trial_%d_channels.csv" % trial.id
                full_filename = "%s/trial_%d_channels.csv" % (tempdir,
                                                              trial.id)
                filenames[filename] = full_filename

                f = open(full_filename, "w")
                metaWriter = UnicodeWriter(f,
                                           delimiter=delimiter_char,
                                           doublequote='false',
                                           escapechar='\\',
                                           quotechar=quotechar_char,
                                           quoting=csv.QUOTE_MINIMAL)
                metaWriter.writerow(headers)
                objects = {}
                for lineup in trial.session.channellineup_set.all():
                    objects = {}
                    ch = lineup.channel

                    if ch == None:
                        values = ["deadchannel"]
                    else:
                        objects["Channel"] = lineup.channel
                        values = [ch.id]
                        objects["Setup"] = ch.setup
                        for channel_type in channel_types:
                            if hasattr(ch, channel_type):
                                objects["Sensor"] = getattr(
                                    ch, channel_type).sensor
                        if hasattr(ch.setup, 'emgsetup'):
                            objects["EmgSetup"] = ch.setup.emgsetup
                        if hasattr(ch.setup, 'sonosetup'):
                            objects["SonoSetup"] = ch.setup.sonosetup
                        if hasattr(ch, 'emgchannel'):
                            objects["EmgChannel"] = ch.emgchannel
                            objects["Sensor"] = ch.emgchannel.sensor
                            objects["EmgSensor"] = ch.emgchannel.sensor
                        if hasattr(ch, 'eventchannel'):
                            objects["EventChannel"] = ch.eventchannel
                        if hasattr(ch, 'pressurechannel'):
                            objects["PressureChannel"] = ch.pressurechannel
                        if hasattr(ch, 'strainchannel'):
                            objects["StrainChannel"] = ch.strainchannel
                        if hasattr(ch, 'forcechannel'):
                            objects["ForceChannel"] = ch.forcechannel
                        if hasattr(ch, 'kinematicschannel'):
                            objects["KinematicsChannel"] = ch.kinematicschannel
                        if hasattr(ch, 'sonochannel'):
                            objects["SonoChannel"] = ch.sonochannel
                            objects["Sensor"] = ch.sonochannel.crystal1
                            objects["SonoSensor"] = ch.sonochannel.crystal1
                        if hasattr(ch, 'emgchannel'):
                            objects["Sensor"] = ch.emgchannel.sensor

                    for key, value in meta_selected.items():
                        if key in ('Setup', 'EmgSetup', 'SonoSetup', 'Sensor',
                                   'EmgSensor', 'SonoSensor', 'Channel',
                                   'EmgChannel', 'SonoChannel',
                                   'PressureChannel', 'ForceChannel',
                                   'StrainChannel', 'KinematicsChannel',
                                   'EventChannel'):
                            for v in value:
                                s = ''
                                if key in objects and objects[key] != None:
                                    s = getattr(objects[key], v[0])
                                    if hasattr(
                                            s,
                                            'split'):  #check if s is a string
                                        ss = s.split('\r\n')
                                        if len(ss) > 1:
                                            s = ' '.join(ss)
                                values.append(s)

                    #output the second crystal sensor information if it is sono channel
                    if hasattr(ch, 'sonochannel'):
                        objects["Sensor"] = ch.sonochannel.crystal2
                        objects["SonoSensor"] = ch.sonochannel.crystal2
                        for key, value in meta_selected.items():
                            if key in ('Sensor', 'SonoSensor'):
                                for v in value:
                                    s = ''
                                    if key in objects:
                                        s = getattr(objects[key], v[0])
                                        if hasattr(s, 'split'):
                                            ss = s.split('\r\n')
                                            if len(ss) > 1:
                                                s = ' '.join(ss)
                                    values.append(s)
                    metaWriter.writerow(values)

                f.close()
        #
        # put data files into the tmp zip
        #
        data_files = {}
        if (download_choice == "1" or download_choice == "2"):
            # download all trial files
            if channel_choice == "0":
                for trial in bucket.trials.all():
                    #check if there is a data file
                    if (trial.data_file != None and trial.data_file != ""):
                        filename = "trial_%d.dat" % trial.id
                        full_filename = "%s/%s" % (settings.MEDIA_ROOT,
                                                   trial.data_file)
                        data_files[filename] = full_filename
            else:
                # download selected channels
                filename = "channels.dat"
                full_filename = "%s/channels.dat" % tempdir
                filenames[filename] = full_filename
                f = open(full_filename, "w")
                metaWriter = UnicodeWriter(f,
                                           delimiter=delimiter_char,
                                           doublequote='false',
                                           escapechar='\\',
                                           quotechar=quotechar_char,
                                           quoting=csv.QUOTE_MINIMAL)
                metaWriter.writerow(channel_headers)
                trial_readers = {}
                total_trial_number = 0
                for trial in bucket.trials.all():
                    #check if there is a data file
                    if (trial.data_file != None and trial.data_file != ""
                            and str(trial.id) in trials_download):
                        full_filename = "%s/%s" % (settings.MEDIA_ROOT,
                                                   trial.data_file)
                        csvfile = open(full_filename, "rU")
                        dialect = csv.Sniffer().sniff(csvfile.read(1024))
                        csvfile.seek(0)
                        reader = csv.reader(csvfile, dialect)
                        trial_readers[str(trial.id)] = {
                            "reader": reader,
                            "hasmore": True,
                            "file": csvfile
                        }
                        total_trial_number += 1

                rows = {}
                newrow = []
                finished_file_number = 0

                while finished_file_number < total_trial_number:
                    rows.clear()
                    for key in trial_readers:
                        try:
                            if trial_readers[key]["hasmore"]:
                                row = trial_readers[key]["reader"].next()
                                rows[key] = row
                        except StopIteration:
                            finished_file_number += 1
                            trial_readers[key]["hasmore"] = False
                            trial_readers[key]["file"].close()

                    newrow = []
                    for ch in channel_download:
                        if ch[0] in rows:
                            if int(ch[1]) > len(rows[ch[0]]):
                                messages.error(
                                    request,
                                    "Error in channel lineup positions for trial: %s"
                                    % ch[0])
                                c = RequestContext(
                                    request, {'title': 'FeedDB Explorer'})
                                return render_to_response(
                                    'explorer/base.html', c)
                            newrow.append(rows[ch[0]][int(ch[1]) - 1])
                        else:
                            newrow.append('')
                    metaWriter.writerow(newrow)
                f.close()
        response = send_zipfile(request, filenames, data_files, zipfile_name)
        for file, full_file in filenames.items():
            os.remove(full_file)
        os.rmdir(tempdir)
        return response
Пример #11
0
                            title_words = set([stemmer.stemWord(word.lower())
                                               for word in title.split()
                                               if word not in stopwords
                                               ])
                            if name_words == title_words:
                                wiki_match, confidence = wikipedia_match(ann)

            except Exception as e:
                import pdb
                pdb.set_trace()

            if wiki_match:
                outfile_name = new_annotations
                if close_match:
                    close_match_obj = close_match.split('/')[-1]
                    wiki_match_obj = wiki_match.split('/')[-1]

                    if close_match_obj != wiki_match_obj:
                        outfile_name = different_annotations
                    else:
                        print name, 'già matchato con: ', close_match
                        continue

                with open(outfile_name, 'a+') as outfile:
                    writer = UnicodeWriter(outfile)
                    writer.writerow([subject_url,
                                     name,
                                     wiki_match,
                                     unicode(confidence)
                                     ])