Beispiel #1
0
	def test_doubles(self):
		d_input = [[20.1,25.2], [25.0,30.3], [45.1,55.5]]
		d_expected = [[20.1,25.0,45.1],[25.2,30.3,55.5]]
		d = Integration()
		d.set_input(d_input)
		d.integrate(0,[])
		d_result = d.get_solutions()
		d.set_orders(d_result)
		self.assertEqual(d_expected,d.get_orders(),"Result doesn't match")
Beispiel #2
0
    def create(self):

        self.makePPMScale()

        self.newPlot()

        self.dataSource = ColumnDataSource(
            data=dict(ppm=self.ppmScale, data=self.pdata))

        self.reference = Reference(self.logger, self.dataSource)
        self.reference.create()
        self.reference.draw(self.plot)

        self.peakPicking = PeakPicking(self.logger, self.id, self.dic,
                                       self.udic, self.pdata, self.dataSource,
                                       self.reference)
        self.peakPicking.create()
        self.peakPicking.draw(self.plot)

        self.integration = Integration(self.logger, self.id, self.pdata,
                                       self.dataSource, self.reference)
        self.integration.create()
        self.integration.draw(self.plot)

        self.multipletAnalysis = MultipletAnalysis(self.logger, self.id,
                                                   self.dic, self.udic,
                                                   self.pdata, self.dataSource,
                                                   self.peakPicking,
                                                   self.integration,
                                                   self.reference)
        self.multipletAnalysis.create()
        self.multipletAnalysis.draw(self.plot)

        self.createMeasureJTool()

        self.plot.line('ppm', 'data', source=self.dataSource, line_width=2)
Beispiel #3
0
    def __init__(self, corpus, tfidf, lsi, lsi_index, tfidf_dict, tfidf_web, db_mean_value,
                 ball_tree, id_to_web, d2v_model, db_des, w2v_model, db_key, len_dict):
        self.corpus = corpus                    # bow corpus
        self.tfidf = tfidf                      # tfidf model
        self.lsi = lsi                          # lsi model
        self.lsi_index = lsi_index              # index for lsi similarity computation
        self.tfidf_dict = tfidf_dict            # dictionary for word <-> id
        self.tfidf_web = tfidf_web              # dictionary for doc n <-> website
        self.db_mean_value = db_mean_value      # mean vector <-> website database
        self.ball_tree = ball_tree              # nearest neighbors ball tree structure
        self.id_to_web = id_to_web              # dictioanry to translate nearest neighbor
        self.d2v_model = d2v_model              # description doc2vec model
        self.db_des = db_des                    # website <-> description db connection
        self.w2v_model = w2v_model              # keywords word2vec model
        self.db_key = db_key                    # website <-> keywords db connection
        self.len_dict = len_dict                # website <-> metadata (len) dict
        self.loss = 1.0
        self.key_len_in = 0.0                           # metadata about input website
        self.des_len_in = 0.0                           # they are changed when asking for input web metadata
        self.txt_len_in = 0.0                           # in self.inp_web_info with explicit = True

        # to have the integration functions
        self.integrate = Integration(self.corpus, self.tfidf, self.lsi, self.lsi_index, self.tfidf_web,
                                     self.db_mean_value, self.ball_tree, self.id_to_web, self.d2v_model)
Beispiel #4
0
class CreateJson(object):
    def __init__(self, corpus, tfidf, lsi, lsi_index, tfidf_dict, tfidf_web, db_mean_value,
                 ball_tree, id_to_web, d2v_model, db_des, w2v_model, db_key, len_dict):
        self.corpus = corpus                    # bow corpus
        self.tfidf = tfidf                      # tfidf model
        self.lsi = lsi                          # lsi model
        self.lsi_index = lsi_index              # index for lsi similarity computation
        self.tfidf_dict = tfidf_dict            # dictionary for word <-> id
        self.tfidf_web = tfidf_web              # dictionary for doc n <-> website
        self.db_mean_value = db_mean_value      # mean vector <-> website database
        self.ball_tree = ball_tree              # nearest neighbors ball tree structure
        self.id_to_web = id_to_web              # dictioanry to translate nearest neighbor
        self.d2v_model = d2v_model              # description doc2vec model
        self.db_des = db_des                    # website <-> description db connection
        self.w2v_model = w2v_model              # keywords word2vec model
        self.db_key = db_key                    # website <-> keywords db connection
        self.len_dict = len_dict                # website <-> metadata (len) dict
        self.loss = 1.0
        self.key_len_in = 0.0                           # metadata about input website
        self.des_len_in = 0.0                           # they are changed when asking for input web metadata
        self.txt_len_in = 0.0                           # in self.inp_web_info with explicit = True

        # to have the integration functions
        self.integrate = Integration(self.corpus, self.tfidf, self.lsi, self.lsi_index, self.tfidf_web,
                                     self.db_mean_value, self.ball_tree, self.id_to_web, self.d2v_model)

    def count_text(self, url):
        """function to count text tokens present in tfidf model"""
        # it doesn't really make much sense now that lsi is used...but anyway, returns the number of words in tf-idf
        try:
            indx = self.tfidf_web.values().index(url)       # try to get the index of the website
        except ValueError:
            return 0

        # the following procedure to get tf-idf representation is described in gensim tutorial
        doc_num = self.tfidf_web.keys()[indx]               # now get its id (same index)

        bow = self.corpus[doc_num]                          # transform it in bag of words
        tf_rap = self.tfidf[bow]                            # get its tfidf representation

        # since it is a sparse vector representation, just need to return the length (length = number of words present)
        return len(tf_rap)           # tfidf representation format is [(index_1, value_1),...,(index_n, value_n)]

    def get_weight(self, url):
        # count the keywords/description token/text tokens. We use len_dict[url] = (#keywords, #description tokens)
        try:
            key_len = self.len_dict[url][0]     # retrieve #keywords.
        except KeyError:
            key_len = 0

        try:
            des_len = self.len_dict[url][1]     # retrieve #description tokens
        except KeyError:
            des_len = 0

        text_tokens = self.count_text(url)      # get count of text tokens

        return key_len, des_len, text_tokens

    def inp_web_info(self, url, explicit=False):
        """information on the input website"""
        # if explicit = True, keywords and description tokens are explicitly written. Use it just for input data!

        if explicit:                        # enriched metadata, used only for input website

            try:
                keywords = self.db_key[str(url)]        # try to retrieve keywords from the keywords shelve db
            except KeyError:
                keywords = []

            try:
                description = self.db_des[str(url)]     # try to retrieve description from the description shelve db
            except KeyError:
                description = []

            self.key_len_in = len(keywords)             # count keywords
            self.des_len_in = len(description)          # count description tokens
            self.txt_len_in = self.count_text(url)      # count text tokens

            if self.key_len_in == 0 and self.des_len_in == 0 and self.txt_len_in == 0:
                # if we have neither description nor keywords nor text (no existent website for example)
                return {}

            # create the dictionary with metadata information about the website
            input_dict = {'metadata': {'keywords': keywords, 'description': description,
                                       'keywords_number': self.key_len_in, 'desc_tokens': self.des_len_in,
                                       'text_tokens': self.txt_len_in},
                          'link': 'http://' + url}      # add the url. It is assumed that http redirects to https

        else:
            key_len, des_len, text_tokens = self.get_weight(url)        # get information

            # create the dictionary with restricted metadata information about the website
            input_dict = {'metadata': {'keywords_number': key_len, 'desc_tokens': des_len,
                                       'text_tokens': text_tokens},
                          'link': 'http://' + url}      # add the url. It is assumed that http redirects to https

        return input_dict

    def text_websites(self, weblist, sf, n, only_web=False):
        """compute the n websites most similar according to tfidf, and compute their value also in the other models"""

        # get 20 most similar web according to tfidf
        tfidf_score, tfidf_rank = self.integrate.ms_tfidf(weblist, n)

        text_dict = dict()              # empty dict for json obj creation

        w2v_mean, num = mean_w2v(self.db_mean_value, weblist)  # mean vectors of the websites in input according to w2v
        d2v_mean, num = mean_d2v(self.d2v_model, weblist)      # same for d2v.
        # Can we avoid computing it in other functions as well?

        if not only_web:         # if we want the entire dictionary with metadata and partial score (only_website=false)

            for i in range(0, len(tfidf_rank)):         # for every similar website retrieved through tf-idf model

                item = tfidf_rank[i]                    # get its name (url without http://)

                w2v_s = w2v_distance(self.db_mean_value, w2v_mean, item, self.loss)    # distance according to w2v model
                d2v_s = d2v_distance(self.d2v_model, d2v_mean, item, self.loss)      # distance according to d2v model

                metadata = self.inp_web_info(item)      # get its metadata
                text_dict[item] = metadata              # json obj I part: metadata

                scores = {'w2v': w2v_s,                 # json obj II part: scores according to the three models
                          'd2v': d2v_s,
                          'tfidf': tfidf_score[i]}

                text_dict[item]['scores'] = scores

                if sf.meta_len:     # if the metadata are used to compute the website total_score
                    w2v_d = metadata['metadata']['keywords_number']  # retrieve single metadata in order to use them for
                    d2v_d = metadata['metadata']['desc_tokens']         # the score function
                    tfidf_d = metadata['metadata']['text_tokens']
                    total_score = sf.score_func(w2v_score=w2v_s, d2v_score=d2v_s, tfidf_score=tfidf_score[i],  # compute
                                                key_len_out=w2v_d, des_len_out=d2v_d, txt_len_out=tfidf_d)  # totalscore
                else:
                    # otherwise don't even retrieve metadata and compute total_score
                    total_score = sf.score_func(w2v_score=w2v_s, d2v_score=d2v_s, tfidf_score=tfidf_score[i])

                text_dict[item]['total_score'] = total_score  # add total_score to the dict of output websites info

        else:                # if we want just a dictionary item: total score + link, without visible metadata
            for i in range(0, len(tfidf_rank)):         # for every website siggested by tf-idf model
                item = tfidf_rank[i]                    # get its name

                w2v_s = w2v_distance(self.db_mean_value, w2v_mean, item, self.loss)    # distance according to w2v model
                d2v_s = d2v_distance(self.d2v_model, d2v_mean, item, self.loss)      # distance according to d2v model

                text_dict[item] = {}    # tell that is a dictionary, or can't create new keys

                if sf.meta_len:             # are metadata needed to compute total score?
                    w2v_d, d2v_d, tfidf_d = self.get_weight(item)
                    total_score = sf.score_func(w2v_score=w2v_s, d2v_score=d2v_s, tfidf_score=tfidf_score[i],
                                                key_len_out=w2v_d, des_len_out=d2v_d, txt_len_out=tfidf_d)
                else:
                    # if not, do not retrieve metadata information
                    total_score = sf.score_func(w2v_score=w2v_s, d2v_score=d2v_s, tfidf_score=tfidf_score[i])

                text_dict[item]['total_score'] = total_score        # add total score
                text_dict[item]['link'] = 'http://' + item    # since we are not using self.inp_web_info (for meta&link)

        return text_dict

    def d2v_websites(self, weblist, sf, n, only_web=False):
        """compute the n websites most similar according to d2v, and compute their value also in the other models"""
        # get 20 most similar websites according to d2v
        d2v_score, d2v_rank = self.integrate.ms_d2v(weblist, n)     # retrieve n similar websites suggested by d2v
        d2v_dict = dict()           # empty dict for json obj creation

        w2v_mean, num = mean_w2v(self.db_mean_value, weblist)       # mean value of input websites according to w2v
        tfidf_mean, num = mean_tfidf(self.tfidf_web, self.corpus, self.tfidf, self.lsi, weblist)  # mean value according to tf-idf

        if not only_web:        # if in the query only_website=false

            for i in range(0, len(d2v_rank)):               # for every similar website suggested by d2v

                item = d2v_rank[i]                  # get its name (url without http://)

                w2v_s = w2v_distance(self.db_mean_value, w2v_mean, item, self.loss)   # distance according to w2v model
                # and according to tfidf :
                tfidf_s = tfidf_distance(self.corpus, self.tfidf, self.tfidf_web, tfidf_mean, item, self.loss)

                metadata = self.inp_web_info(item)    # retrieve its metadata and link
                d2v_dict[item] = metadata             # json obj I part: metadata attached

                scores = {'w2v': w2v_s,               # json obj II part: scores according to the three models
                          'd2v': d2v_score[i],
                          'tfidf': tfidf_s}

                d2v_dict[item]['scores'] = scores       # attach scores

                if sf.meta_len:                 # if metadata are needed to compute total_score
                    w2v_d = metadata['metadata']['keywords_number']  # retrieve single metadata in order to use them for
                    d2v_d = metadata['metadata']['desc_tokens']         # the score function
                    tfidf_d = metadata['metadata']['text_tokens']

                    total_score = sf.score_func(w2v_score=w2v_s, d2v_score=d2v_score[i], tfidf_score=tfidf_s,
                                                key_len_out=w2v_d, des_len_out=d2v_d, txt_len_out=tfidf_d)

                else:       # if they are not needed, don't even retrieve them
                    total_score = sf.score_func(w2v_score=w2v_s, d2v_score=d2v_score[i], tfidf_score=tfidf_s)

                d2v_dict[item].update({'total_score': total_score})     # attach total score

        else:           # if only_website=true, so we don't show metadata in the output websites
            for i in range(0, len(d2v_rank)):       # for all the websites suggested by d2v
                item = d2v_rank[i]
                d2v_dict[item] = {}     # create the json object with web info

                w2v_s = w2v_distance(self.db_mean_value, w2v_mean, item, self.loss)   # distance according to w2v model
                # and according to tfidf
                tfidf_s = tfidf_distance(self.corpus, self.tfidf, self.tfidf_web, tfidf_mean, item, self.loss)

                # compute the total score
                if sf.meta_len:         # if metadata are needed to compute the total score
                    w2v_d, d2v_d, tfidf_d = self.get_weight(item)
                    total_score = sf.score_func(w2v_score=w2v_s, d2v_score=d2v_score[i], tfidf_score=tfidf_s,
                                                key_len_out=w2v_d, des_len_out=d2v_d, txt_len_out=tfidf_d)
                else:       # otherwise don't retrieve them
                    total_score = sf.score_func(w2v_score=w2v_s, d2v_score=d2v_score[i], tfidf_score=tfidf_s)

                d2v_dict[item]['total_score'] = total_score     # attach total score and link, since we are not
                d2v_dict[item]['link'] = 'http://' + item       # using self.inp_web_info for meta & link

        return d2v_dict

    def w2v_websites(self, weblist, sf, n, only_web=False):
        """compute the n websites most similar according to w2v, and compute their value also in the other models"""
        # 20 most similar according to w2v
        w2v_score, w2v_rank = self.integrate.ms_w2v_key(weblist, n)
        w2v_dict = dict()             # empty dict for json obj creation

        # weblist is the input list of websites
        d2v_mean, num = mean_d2v(self.d2v_model, weblist)      # input website vector rep mean value according to d2v
        tfidf_mean, num = mean_tfidf(self.tfidf_web, self.corpus, self.tfidf, self.lsi, weblist)      # same for tf-idf

        if not only_web:            # if only_website=false (so we want to see metadata of websites in the output)
            for i in range(0, len(w2v_rank)):               # for every similar website suggested by w2v

                item = w2v_rank[i]                          # get its name (url without http://)

                # compute the distance according to d2v model
                d2v_s = d2v_distance(self.d2v_model, d2v_mean, item, self.loss)
                # and according to tfidf
                tfidf_s = tfidf_distance(self.corpus, self.tfidf, self.tfidf_web, tfidf_mean, item, self.loss)

                metadata = self.inp_web_info(item)
                w2v_dict[item] = metadata                   # json obj I part: metadata

                scores = {'w2v': w2v_score[i],              # json obj II part: scores according to the three models
                          'd2v': d2v_s,
                          'tfidf': tfidf_s}

                w2v_dict[item]['scores'] = scores           # append scores

                if sf.meta_len:             # if metadata are needed to compute the total score
                    w2v_d = metadata['metadata']['keywords_number']  # retrieve single metadata in order to use them for
                    d2v_d = metadata['metadata']['desc_tokens']         # the score function
                    tfidf_d = metadata['metadata']['text_tokens']
                    total_score = sf.score_func(w2v_score=w2v_score[i], d2v_score=d2v_s, tfidf_score=tfidf_s,
                                                key_len_out=w2v_d, des_len_out=d2v_d, txt_len_out=tfidf_d)

                else:   # otherwise don't even retrieve them
                    total_score = sf.score_func(w2v_score=w2v_score[i], d2v_score=d2v_s, tfidf_score=tfidf_s)

                w2v_dict[item]['total_score'] = total_score     # append total score

        else:       # if only_website=true => we don't want to se metadata in the output

            for i in range(0, len(w2v_rank)):       # for every website suggested by w2v
                item = w2v_rank[i]                  # retrieve its name (url without http://)
                w2v_dict[item] = {}         # say it is a dictionary to add keys

                # compute the distance according to d2v model
                d2v_s = d2v_distance(self.d2v_model, d2v_mean, item, self.loss)

                # and according to tfidf
                tfidf_s = tfidf_distance(self.corpus, self.tfidf, self.tfidf_web, tfidf_mean, item, self.loss)

                if sf.meta_len:         # if metadata are needed  for computing the total score
                    w2v_d, d2v_d, tfidf_d = self.get_weight(item)

                    # compute the total score
                    total_score = sf.score_func(w2v_score=w2v_score[i], d2v_score=d2v_s, tfidf_score=tfidf_s,
                                                key_len_out=w2v_d, des_len_out=d2v_d, txt_len_out=tfidf_d)
                else:       # otherwise...u got it by now
                    total_score = sf.score_func(w2v_score=w2v_score[i], d2v_score=d2v_s, tfidf_score=tfidf_s)

                w2v_dict[item]['total_score'] = total_score  # attach total score and link. link addes since
                w2v_dict[item]['link'] = 'http://' + item   # we are not using self.inp_web_info

        return w2v_dict

    def get_json(self, weblist, sf, n, only_web=False):                 # weblist must be a list
        """generate the json object with the wanted information"""

        # putting inp_data as first operation because it changes some class parameters then used in others

        d2v_web = self.d2v_websites(weblist, sf, n, only_web)       # construct dictionary doc2vec similar websites
        txt_web = self.text_websites(weblist, sf, n, only_web)      # construct dictionary with tf-idf similar websites
        w2v_web = self.w2v_websites(weblist, sf, n, only_web)       # construct dictionary with word2v similar websites

        d2v_web.update(w2v_web)                     # update first dictionary with the second and the third one
        d2v_web.update(txt_web)                     # to avoid repetitions.

        # now a json obj is created: metadata of the input website, with the output given by the three models
        if d2v_web:     # if the dictionary is not empty
            input_metadata = dict()

            for website in weblist:                                 # input_website_metadata part
                inp_web = self.inp_web_info(website, explicit=True)
                if inp_web:     # if it exists in the models
                    input_metadata[website] = inp_web
                else:
                    input_metadata[website] = 'website not present in the models'

            # it has to be ordered according to the total score (it is done in company_sim.py)
            json_obj = {'input_website_metadata': input_metadata, 'output': d2v_web}
        else:
            json_obj = {}

        return json_obj
Beispiel #5
0
#Tiffany Wang 260684152
#Numerical Methods ECSE 543 - Assignment 3

import math
from curvefitting import CurveFitting
from nonlinear import NonLinear
from circuit import Circuit
from integration import Integration

cv = CurveFitting()
nl = NonLinear()
c = Circuit()
i = Integration()

#======================================Question 1======================================

# # a)
# B_1 = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]
# H_1 = [0.0, 14.7, 36.5, 71.7, 121.4, 197.4]

# #get H value at every 0.01
# x = [0.01 * i for i in range(101)]
# result = []
# for i in range(len(x)):
# 	print "B: ", i, "   H: ", round(cv.lagrange(x[i], B_1, H_1), 5)
# 	result.append(cv.lagrange(x[i], B_1, H_1))

# file = open("magnetic1.csv", "w")
# for i in range(len(x)):
# 	string = str(x[i]) + ", " + str(result[i]) + "\n"
# 	file.write(string)
Beispiel #6
0
from integration import Integration
import sys

inFile = sys.argv[1]
f = open(inFile,'r')
input = f.read().splitlines()

f.close()

fretes = [float(x) for x in input[0].split(',')]
itens = [float(x) for x in input[1].split(',')]
totais = [float(x) for x in input[2].split(',')]

input = [fretes,itens,totais]
i = Integration()
i.set_input(input)
i.integrate(0,[])
i.set_orders(i.get_solutions())
i.pretty_print()
import json
import traceback

with open('settings.json', "rb") as PFile:
    settings_data = json.loads(PFile.read().decode('utf-8'))

with open('settings_schema.json', "rb") as PFile:
    data_schema = json.loads(PFile.read().decode('utf-8'))

try:
    validate(instance=settings_data, schema=data_schema)
except Exception as e:
    raise Exception("Incorrect value in the settings file\n{}".format(str(e)))

ov_url = settings_data['ovUrl']
ov_access_key = settings_data['ovAccessKey']
ov_secret_key = settings_data['ovSecretKey']
ov_integration_name = settings_data['ovIntegrationName']

field_n = settings_data['dataN']

with open('ihub_parameters.json', "rb") as PFile:
    ihub_data = json.loads(PFile.read().decode('utf-8'))

process_id = ihub_data['processId']

integration_log = IntegrationLog(process_id, ov_url, ov_access_key, ov_secret_key, ov_integration_name, ov_token=True)
integration = Integration(integration_log)

integration.start()
Beispiel #8
0
class Plot:

    WIDTH = 800
    HEIGHT = 600

    def __init__(self, logger, id, path, compound):
        self.logger = logger

        self.logger.info("Parsing experiment data")
        self.dic, _ = ng.bruker.read(path)
        _, self.pdata = ng.bruker.read_pdata("{}/pdata/1/".format(path))
        self.logger.info("Experiment data parsed successfully")

        self.compound = compound
        self.id = SpectrumDB.Create(
            id
        )  # SpectrumDB.Create(hashlib.sha256(self.pdata.tostring()).hexdigest())

    def createReferenceLayout(self):
        return column(
            row(column(self.reference.old), column(self.reference.new)),
            row(self.reference.button))

    def createPeakPickingLayout(self):
        return column(
            CustomRow(column(self.peakPicking.manual),
                      column(self.peakPicking.peak),
                      hide=True), row(self.peakPicking.dataTable),
            row(column(self.peakPicking.deselectButton),
                column(self.peakPicking.deleteButton)),
            row(self.peakPicking.chemicalShiftReportTitle),
            row(self.peakPicking.chemicalShiftReport))

    def createIntegrationLayout(self):
        return column(
            CustomRow(column(self.integration.manual), hide=True),
            row(self.integration.dataTable),
            row(column(self.integration.deselectButton),
                column(self.integration.deleteButton)))

    def createMultipletManagerLayout(self):
        return column(
            CustomRow(column(self.multipletAnalysis.manual), hide=True),
            row(self.multipletAnalysis.dataTable),
            row(self.multipletAnalysis.title),
            row(column(self.multipletAnalysis.classes),
                column(self.multipletAnalysis.integral),
                column(self.multipletAnalysis.j)),
            row(self.multipletAnalysis.delete),
            row(self.multipletAnalysis.reportTitle),
            row(self.multipletAnalysis.report))

    def createTabs(self, tabs):
        callback = CustomJS(args=dict(
            referenceTool=self.reference.tool,
            peakPickingManualTool=self.peakPicking.manualTool,
            peakByPeakTool=self.peakPicking.peakTool,
            integrationTool=self.integration.tool,
            multipletAnalysisTool=self.multipletAnalysis.tool),
                            code="""
            switch(this.active) {
            case 0:
                referenceTool.active = true;
                break;
            case 1:
                if (!peakByPeakTool.active) {
                    peakPickingManualTool.active = true;
                }
                break;
            case 2:
                integrationTool.active = true;
                break;
            case 3:
                multipletAnalysisTool.active = true;
                break;
            }
        """)
        return Tabs(tabs=tabs, width=500, callback=callback, id="tabs")

    def draw(self):
        try:

            referenceLayout = self.createReferenceLayout()
            peakPickingLayout = self.createPeakPickingLayout()
            integrationLayout = self.createIntegrationLayout()
            multipletManagerLayout = self.createMultipletManagerLayout()

            referenceTab = Panel(child=referenceLayout, title="Reference")
            peakPickingTab = Panel(child=peakPickingLayout,
                                   title="Peak Picking")
            integrationTab = Panel(child=integrationLayout,
                                   title="Integration")
            multipletAnalysisTab = Panel(child=multipletManagerLayout,
                                         title="Multiplet Analysis")

            tabs = self.createTabs([
                referenceTab, peakPickingTab, integrationTab,
                multipletAnalysisTab
            ])

            curdoc().add_root(
                row(
                    column(
                        row(self.plot),
                        row(Div(text=self.compound, id="compoundContainer"))),
                    column(row(tabs))))
            curdoc().title = "NMR Analysis Tool - " + str(self.id)
        except NameError:
            print("Please create plot first")

    def create(self):

        self.makePPMScale()

        self.newPlot()

        self.dataSource = ColumnDataSource(
            data=dict(ppm=self.ppmScale, data=self.pdata))

        self.reference = Reference(self.logger, self.dataSource)
        self.reference.create()
        self.reference.draw(self.plot)

        self.peakPicking = PeakPicking(self.logger, self.id, self.dic,
                                       self.udic, self.pdata, self.dataSource,
                                       self.reference)
        self.peakPicking.create()
        self.peakPicking.draw(self.plot)

        self.integration = Integration(self.logger, self.id, self.pdata,
                                       self.dataSource, self.reference)
        self.integration.create()
        self.integration.draw(self.plot)

        self.multipletAnalysis = MultipletAnalysis(self.logger, self.id,
                                                   self.dic, self.udic,
                                                   self.pdata, self.dataSource,
                                                   self.peakPicking,
                                                   self.integration,
                                                   self.reference)
        self.multipletAnalysis.create()
        self.multipletAnalysis.draw(self.plot)

        self.createMeasureJTool()

        self.plot.line('ppm', 'data', source=self.dataSource, line_width=2)

    # make ppm scale
    def makePPMScale(self):
        self.udic = ng.bruker.guess_udic(self.dic, self.pdata)
        uc = ng.fileiobase.uc_from_udic(self.udic)
        self.ppmScale = uc.ppm_scale()

    # create a new plot with a title and axis labels
    def newPlot(self):
        #Constants
        xr = Range1d(start=int(max(self.ppmScale) + 1),
                     end=int(min(self.ppmScale) - 1))

        self.plot = figure(x_axis_label='ppm',
                           x_range=xr,
                           toolbar=CustomToolbar(),
                           tools="pan,save,reset",
                           plot_width=self.WIDTH,
                           plot_height=self.HEIGHT)

        # Remove grid from plot
        self.plot.xgrid.grid_line_color = None
        self.plot.ygrid.grid_line_color = None

        horizontalBoxZoomTool = HorizontalBoxZoomTool()
        self.plot.add_tools(horizontalBoxZoomTool)
        self.plot.toolbar.active_drag = horizontalBoxZoomTool

        fixedWheelZoomTool = FixedWheelZoomTool(dimensions="height")
        self.plot.add_tools(fixedWheelZoomTool)
        self.plot.toolbar.active_scroll = fixedWheelZoomTool

        fixedZoomOutTool = FixedZoomOutTool(factor=0.4)
        self.plot.add_tools(fixedZoomOutTool)

        hoverTool = HoverTool(tooltips="($x, $y)")
        self.plot.add_tools(hoverTool)

    def createMeasureJTool(self):
        source = ColumnDataSource(data=dict(x=[], y=[]))
        label = Label(x=0,
                      y=0,
                      text="",
                      text_color="#000000",
                      render_mode="css")
        self.plot.add_layout(label)

        measureJTool = MeasureJTool(label=label,
                                    frequency=getFrequency(self.udic),
                                    id="measureJTool")
        self.plot.add_tools(measureJTool)