示例#1
0
    def sort(self):

        # Showing text while image is loading and processing
        self.label_2.setText('Processing ...')

        # Opening dialog window to choose an image file
        # Giving name to the dialog window --> 'Choose Image to Open'
        # Specifying starting directory --> '.'
        # Showing only needed files to choose from --> '*.png *.jpg *.bmp'
        # noinspection PyCallByClass
        image_path = \
            QtWidgets.QFileDialog.getOpenFileName(self, 'Choose Image to Open',
                                                  '.',
                                                  '*.xlsx')

        # Variable 'image_path' now is a tuple that consists of two elements
        # First one is a full path to the chosen image file
        # Second one is a string with possible extensions

        # Checkpoint
        print(type(image_path))  # <class 'tuple'>
        print(image_path[0])  # /home/my_name/Downloads/example.png
        print(image_path[1])  # *.png *.jpg *.bmp

        # Slicing only needed full path
        image_path = image_path[0]  # /home/my_name/Downloads/example.png

        # Passing full path to loaded image into YOLO v3 algorithm
        sort(image_path, keywords_list)
示例#2
0
def create_list():
    v.deleted.clear()
    for i in v.all_alph:
        i.clear()
    file = open(r"CitiesHard.txt").read(
    )  # Открытие файла со списком городов на чтение
    new = file.split("\n")  # Создание вектора всех городов
    s.sort(new)  # Сортировка вектора
示例#3
0
def parse_compose_file(my_path, yaml_file):
    global path
    path = os.path.basename(os.path.dirname(my_path))

    services = yaml_file['services']
    sorted_services = sorting.sort(services)

    parsed_services = []
    for d in sorted_services:
        for name, service in d.items():
            parsed_services.append(ServiceParser(name, service))

    networks = yaml_file['networks']
    for n in networks:
      if n == 'default':
        for k,v in networks[n].items():
            if k == 'external': # add this network
              commands.append("docker network create  {}".format(v['name']))
              for s in parsed_services:
                s.docker_args['network'] = " --network={} ".format(v['name'])

    volumes = yaml_file['volumes']
    for v in volumes:
        commands.append("docker volume  create  {}_{}".format(path, v))            

    for s in parsed_services:
        commands.append(s.write_run_command())
    return commands
示例#4
0
    def list(self, request):
        '''Lists comments'''

        queryset = sorting.sort(request, self.model.objects.all())
        objects = get_paginated(request, queryset)

        return Response(self.paginated_serializer(objects, context={'request': request}).data)
示例#5
0
    def news(self, request, pk=None):
        '''Retrieves a list of news items this user has created'''
        serializer = motsdits_serializers.PaginatedNewsSerializer

        queryset = sorting.sort(request, News.objects.filter(created_by=pk))
        news = get_paginated(request, queryset)

        return Response(serializer(news, context={'request': request}).data)
示例#6
0
    def favourites(self, request, pk=None):
        '''Retrieves a list of motsdits this user has favourited'''
        serializer = motsdits_serializers.PaginatedMotDitSerializer

        queryset = sorting.sort(request, MotDit.objects.filter(favourites=pk))
        motsdits = get_paginated(request, queryset)

        return Response(serializer(motsdits, context={'request': request}).data)
示例#7
0
    def photos(self, request, pk=None):
        '''Retrieves a list of photos related to this item'''

        serializer = motsdits_compact.PaginatedCompactPhotoSerializer

        queryset = sorting.sort(request, Photo.objects.filter(Q(motdit__what=pk) | Q(motdit__where=pk)))
        photos = get_paginated(request, queryset)
        return Response(serializer(photos, context={'request': request}).data)
示例#8
0
    def liked_stories(self, request, pk=None):
        '''Retrieves a list of motsdits this user has liked'''
        serializer = motsdits_serializers.PaginatedStorySerializer

        queryset = sorting.sort(request, Story.objects.filter(likes=pk))
        stories = get_paginated(request, queryset)

        return Response(serializer(stories, context={'request': request}).data)
示例#9
0
    def followers(self, request, pk=None):
        '''Retrieves a list of motsdits this user has liked'''
        serializer = accounts_serializers.PaginatedUserSerializer

        queryset = sorting.sort(request, get_user_model().objects.filter(following=pk))
        users = get_paginated(request, queryset)

        return Response(serializer(users, context={'request': request}).data)
示例#10
0
    def news(self, request, pk=None):
        '''Retrieves a list of news related to this item'''
        serializer = motsdits_compact.PaginatedCompactNewsSerializer

        queryset = sorting.sort(request, News.objects.filter(motdit=pk))
        stories = get_paginated(request, queryset)

        return Response(serializer(stories, context={'request': request}).data)
示例#11
0
    def comments(self, request, pk=None):
        '''Retrieves a list of stories related to this item'''
        # @TODO: Add pagination
        serializer = motsdits_compact.PaginatedCompactCommentSerializer

        queryset = sorting.sort(request, Comment.objects.filter(news_item=pk))
        comments = get_paginated(request, queryset)

        return Response(serializer(comments, context={'request': request}).data)
示例#12
0
    def related(self, request, pk=None):
        '''Recommend the motdit'''

        serializer = motsdits_serializers.PaginatedMotDitSerializer

        queryset = sorting.sort(request, MotDit.objects.filter(Q(what=pk) | Q(where=pk)))
        motsdits = get_paginated(request, queryset)

        return Response(serializer(motsdits, context={'request': request}).data)
def parse_compose_file(yaml_file):
    services = yaml_file['services']
    sorted_services = sorting.sort(services)

    parsed_services = []
    for d in sorted_services:
        for k, v in d.items():
            parsed_services.append(parse_service(k, v))

    commands = []
    for s in parsed_services:
        commands.append(write_run_command(s))
    return commands
示例#14
0
def parse_compose_file(yaml_file):
    services = yaml_file['services']
    sorted_services = sorting.sort(services)

    parsed_services = []
    for d in sorted_services:
        for k, v in d.items():
            parsed_services.append(parse_service(k, v))

    commands = []
    for s in parsed_services:
        commands.append(write_run_command(s))
    return commands
示例#15
0
    def answers(self, request, pk=None):
        '''Allows users to post an answer to the question, either as a new motdit or as a motdit id'''

        # Load the relevant question
        question = Question.objects.get(pk=pk)

        # List all answers
        if request.method == 'GET':

            serializer = motsdits_compact.PaginatedCompactAnswerSerializer

            queryset = sorting.sort(request, Answer.objects.filter(question=question))
            objects = get_paginated(request, queryset)

            return Response(serializer(objects, context={'request': request}).data)

        # Create a new answer
        elif request.method == 'POST':

            serializer = motsdits_compact.CompactAnswerSerializer

            # Either supply a discrete motdit ID
            if isinstance(request.DATA['motdit'], int):
                motdit = MotDit.objects.get(pk=request.DATA['motdit'])
            else:

                try:
                    # Handle the response
                    response = create_motdit(request, request.DATA['motdit'])
                    motdit = MotDit.objects.get(pk=response.data['id'])

                except Exception as e:
                    return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)

            answer, created = Answer.objects.get_or_create(
                question=question,
                answer=motdit,
                created_by=request.user
            )

            if created:
                signals.question_answered.send(request.user.__class__, created_by=request.user, question=question, answer=answer, motdit=motdit)
                status_code = status.HTTP_201_CREATED
            else:
                status_code = status.HTTP_200_OK

            return Response(serializer(answer, context={'request': request}).data, status=status_code)
示例#16
0
def test_sort(ary):
    assert not is_sorted(ary)
    sorting.sort(ary)
    assert is_sorted(ary)
示例#17
0
文件: index.py 项目: deelin/osindex
def index_kw(kw_file, out_file=OUT_FILE):
    SEPARATOR = "|"
    headers = {"Accept": "application/vnd.github.v3.text-match+json"}

    kws = format_kw(kw_file)
    sorted_kws = sort([kw[0] for kw in kws])
    with open('/tmp/sorted.txt', 'w') as f:
        f.write('\n'.join(sorted_kws))
    with open(out_file, 'a') as f:
        for pair in kws:
            time.sleep(60)

            fields = []
            name = pair[0]
            corrected_name = correct_keys(pair[0])
            print "Starting for %s which was corrected to %s" % (name, corrected_name)
            fields = [name]
            if name in sorted_kws:
                fields.append(str(len(sorted_kws) - sorted_kws.index(name) + 1))
            else:
                fields.append('???')

            if name in SEARCH_KWS:
                terms = SEARCH_KWS[name]
                terms.insert(0, name)
                # Indeed
                indeed_count = aggregate_searchs(terms, fetch_indeed)
                fields.append(indeed_count)

                # SimplyHired
                simplyhired_count = aggregate_searchs(terms, fetch_simplyhired)
                fields.append(simplyhired_count)
            else:
                fields.append(fetch_indeed([corrected_name]))
                fields.append(fetch_simplyhired([corrected_name]))


            # StackOverflow
            fields.append(fetch_stackoverflow([name]))

            if pair[1]:
                # Github available, check github for stars etc.
                url = "https://api.github.com/repos/%s/%s" % pair[1:]
                x = requests.get(url, headers=headers)
                repo = json.loads(x.text)
                git_fields = ['network_count', 'stargazers_count', 'subscribers_count', 'html_url']
                for field in git_fields:
                    if field not in repo:
                        break
                forks = repo['network_count']
                fields.append(forks)
                stars = repo['stargazers_count']
                fields.append(stars)
                watchers = repo['subscribers_count']
                fields.append(watchers)
                url = repo['html_url']
                fields.append(url)

            line = SEPARATOR.join([unicode(x) for x in fields]).encode('utf-8')
            print "Writing " + line
            f.write(line)
            f.write('\n')
示例#18
0
 def test_bubble2(self):
     bubble = sort()
     listamal = [0, 1, 77, 3, 77, 4, 77, 3, 2, 5]
     listabien = bubble.bubbleSort(listamal)
     self.assertEqual(listabien, [0, 1, 2, 3, 3, 4, 5, 77, 77, 77])
示例#19
0
 def test_bubble1(self):
     bubble = sort()
     listamal = [66, 71, 16, 21, 79, 9, 40, 60, 5]
     listabien = bubble.bubbleSort(listamal)
     self.assertEqual(listabien, [5, 9, 16, 21, 40, 60, 66, 71, 79])
示例#20
0
f_algo = form.getvalue('algorithm')
f_size = form.getvalue('problem_size')

if not f_type:
    # Print HTML form til valg af problem type
    print htmlforms['problem_type']

elif not f_algo:
    if f_type in htmlforms:
        # Print HTML form til valg af algoritme
        print htmlforms[f_type]
    else:
        print 'Den valgte problemtype er ikke understøttet.'

else:
    if f_type == 'sort':
        # Start HTML tabel
        print '<table><tr><td>Algoritme:</td><td>Tid:</td></tr>'
        for result in sorting.sort(f_algo, int(f_size)):
            # Udpak resultat
            name, time = result
            # Gør navnet lidt pænere
            name = name.replace('_', ' ').title()
            # Print HTML række
            print '<tr><td>%s:</td><td>%.4f</td></tr>' % (name, time)
        # S**t HTML tabel
        print '</table>'

# Print HTML footer
print '''</body></html>'''
示例#21
0
from sorting import sort
import random as rd

l = [rd.randint(1, 10) for _ in range(10)]

print(sort(l, 'merge'))
示例#22
0
 def testArray3(self):
     array3 = [5, 0, 15, 25, 21, 35, 40, 25, 6, 9]
     bubble3 = sort().BubbleSort(array3)
     self.assertEqual(array3, bubble3)
示例#23
0
 def testArray2(self):
     array2 = [0, 2, 23, 4, 2, 8, 1, 25, 6, 9]
     bubble2 = sort().BubbleSort(array2)
     self.assertEqual(array2, bubble2)
示例#24
0
def main(year):

    # NBA season

    # URL page we will scrap
    url = "https://www.basketball-reference.com/leagues/NBA_{}_standings.html".format(
        year)

    # HTML from URL
    html = get(url)
    soup = BeautifulSoup(html.text, "html.parser")

    bstWins = Tree()
    bstLoss = Tree()
    bstWin_loss_pct = Tree()
    bstGb = Tree()
    bstPts_per_g = Tree()
    bstOpp_pts_per_g = Tree()
    bstSrs = Tree()

    teamsList = []
    winList = []
    lossList = []
    win_loss_pctList = []
    gbList = []
    pts_per_gList = []
    opp_pts_per_gList = []
    srsList = []

    # Scraps eastern conference table
    easternTable = soup.findAll('table', attrs={"id": "confs_standings_E"})

    for tablerows in easternTable:
        tableBody = tablerows.find_all('tbody')
        for item in tableBody:
            href = item.find_all("a", href=True)

            wins = item.find_all('td', attrs={"data-stat": "wins"})
            loss = item.find_all('td', attrs={"data-stat": "losses"})
            win_loss_pct = item.find_all('td',
                                         attrs={"data-stat": "win_loss_pct"})
            gb = item.find_all('td', attrs={"data-stat": "gb"})
            pts_per_g = item.find_all('td', attrs={"data-stat": "pts_per_g"})
            opp_pts_per_g = item.find_all('td',
                                          attrs={"data-stat": "opp_pts_per_g"})
            srs = item.find_all('td', attrs={"data-stat": "srs"})

            cont = 0
            # Adds Eastern Conference Data to BST
            for name in href:
                teamName = name.text
                teamsList.append(teamName)

                winsItem = float(wins[cont].text)
                winList.append(winsItem)
                bstWins.insert(teamName, winsItem)

                lossItem = float(loss[cont].text)
                lossList.append(lossItem)
                bstLoss.insert(teamName, lossItem)

                win_loss_pctItem = float(win_loss_pct[cont].text)
                win_loss_pctList.append(win_loss_pctItem)
                bstWin_loss_pct.insert(teamName, win_loss_pctItem)

                gbItem = float(gb[cont].text.replace('—', '0'))
                gbList.append(gbItem)
                bstGb.insert(teamName, gbItem)

                pts_per_gItem = float(pts_per_g[cont].text)
                pts_per_gList.append(pts_per_gItem)
                bstPts_per_g.insert(teamName, pts_per_gItem)

                opp_pts_per_gItem = float(opp_pts_per_g[cont].text)
                opp_pts_per_gList.append(opp_pts_per_gItem)
                bstOpp_pts_per_g.insert(teamName, opp_pts_per_gItem)

                srsItem = float(srs[cont].text)
                srsList.append(srsItem)
                bstSrs.insert(teamName, srsItem)

                cont += 1

    # Scraps Western conference table
    westernTable = soup.findAll('table', attrs={"id": "confs_standings_W"})
    for tablerows in westernTable:
        tableBody = tablerows.find_all('tbody')
        for item in tableBody:
            href = item.find_all("a", href=True)

            wwins = item.find_all('td', attrs={"data-stat": "wins"})
            wloss = item.find_all('td', attrs={"data-stat": "losses"})
            wwin_loss_pct = item.find_all('td',
                                          attrs={"data-stat": "win_loss_pct"})
            wgb = item.find_all('td', attrs={"data-stat": "gb"})
            wpts_per_g = item.find_all('td', attrs={"data-stat": "pts_per_g"})
            wopp_pts_per_g = item.find_all(
                'td', attrs={"data-stat": "opp_pts_per_g"})
            wsrs = item.find_all('td', attrs={"data-stat": "srs"})

            wcont = 0
            # Adds Western Conference Data to BST
            for name in href:
                teamName = name.text
                teamsList.append(teamName)

                winsItem = float(wwins[wcont].text)
                winList.append(winsItem)
                bstWins.insert(teamName, winsItem)

                lossItem = float(wloss[wcont].text)
                lossList.append(lossItem)
                bstLoss.insert(teamName, lossItem)

                win_loss_pctItem = float(wwin_loss_pct[wcont].text)
                win_loss_pctList.append(win_loss_pctItem)
                bstWin_loss_pct.insert(teamName, win_loss_pctItem)

                gbItem = float(wgb[wcont].text.replace('—', '0'))
                gbList.append(gbItem)
                bstGb.insert(teamName, gbItem)

                pts_per_gItem = float(wpts_per_g[wcont].text)
                pts_per_gList.append(pts_per_gItem)
                bstPts_per_g.insert(teamName, pts_per_gItem)

                opp_pts_per_gItem = float(wopp_pts_per_g[wcont].text)
                opp_pts_per_gList.append(opp_pts_per_gItem)
                bstOpp_pts_per_g.insert(teamName, opp_pts_per_gItem)

                srsItem = float(wsrs[wcont].text)
                srsList.append(srsItem)
                bstSrs.insert(teamName, srsItem)

                wcont += 1

    #Builds list for mins and max
    minWins = bstWins.find_min()
    maxWins = bstWins.find_max()

    minLoss = bstLoss.find_min()
    maxLoss = bstLoss.find_max()

    minWin_loss_pct = bstWin_loss_pct.find_min()
    maxWin_loss_pct = bstWin_loss_pct.find_max()

    minGb = bstGb.find_min()
    maxGb = bstGb.find_max()

    minPts_per_g = bstPts_per_g.find_min()
    maxPts_per_g = bstPts_per_g.find_max()

    minOpp_pts_per_g = bstOpp_pts_per_g.find_min()
    maxOpp_pts_per_g = bstOpp_pts_per_g.find_max()

    minSrs = bstSrs.find_min()
    maxSrs = bstSrs.find_max()

    minsAndMax = [
        minWins, maxWins, minLoss, maxLoss, minWin_loss_pct, maxWin_loss_pct,
        minGb, maxGb, minPts_per_g, maxPts_per_g, minOpp_pts_per_g,
        maxOpp_pts_per_g, minSrs, maxSrs
    ]

    # Builds data frame from Scrapped Data
    data = {
        'Teams': teamsList,
        'Wins': winList,
        'Loss': lossList,
        'Win-Loss Percentage': win_loss_pctList,
        'Games Behind': gbList,
        'Points Per Game': pts_per_gList,
        'Opponent Points Per Game': opp_pts_per_gList,
        'Simple Rating System': srsList
    }

    # ['wins', 'loss', 'win_loss_pct', 'gb', 'pts_per_g', 'opp_pts_per_g', 'srs']
    df = DataFrame(data,
                   columns=[
                       'Teams', 'Wins', 'Loss', 'Win-Loss Percentage',
                       'Games Behind', 'Points Per Game',
                       'Opponent Points Per Game', 'Simple Rating System'
                   ])
    df.to_csv(r'data\dataframe_NBA.csv', index=False)

    # SORTING
    sortedTeamsList = sort("ALL", teamsList)
    sortedWinList = sort("ALL", winList)
    sortedWinLossPCT = sort("ALL", win_loss_pctList)

    # SEARCHING

    print(sortedWinLossPCT)

    return minsAndMax, year, df, data, sortedTeamsList, sortedWinList, sortedWinLossPCT
示例#25
0
 def testArray1(self):
     array1 = [36, 71, 16, 21, 73, 9, 0, 40, 66, 7]
     bubble1 = sort().BubbleSort(array1)
     self.assertEqual(array1, bubble1)
示例#26
0
 def test1_check_sort(self):
     res = list(sorting.sort([], [], [5]))
     ass = [5]
     self.assertEqual(res, ass)
示例#27
0
 def test2_check_sort(self):
     res = list(sorting.sort([1000], [2.2, 3], [-1]))
     ass = [-1, 2.2, 3, 1000]
     self.assertEqual(res, ass)
示例#28
0
 def test3_check_sort(self):
     res = list(sorting.sort([1], [1, 1], [1, 5]))
     ass = [1, 1, 1, 1, 5]
     self.assertEqual(res, ass)
示例#29
0
 def test4_check_sort(self):
     res = list(sorting.sort([5], [3, 5], [4, 5]))
     ass = [3, 4, 5, 5, 5]
     self.assertEqual(res, ass)
示例#30
0
 def test0_check_sort(self):
     res = list(sorting.sort([1], [2, 3], [4, 5]))
     ass = [1, 2, 3, 4, 5]
     self.assertEqual(res, ass)
示例#31
0
    for photos in photos_tuples:
        out.append(photos[0])
    return out


#datasets = os.listdir("data")
datasets = ["e_shiny_selfies.txt"]

for dataset in datasets:
    print("Started data set {}".format(dataset))
    photos = file_io.read_input(dataset)

    vertical = list(filter(lambda item: item[1] == "V", photos))
    horizontal = filter(lambda item: item[1] == "H", photos)
    horizontal = list(map(lambda item: ((item[0], ), ) + item[1:], horizontal))
    averageH = pairing.findHAverage(horizontal)

    vertical = pairing.pair(vertical, averageH)

    photos = sorting.sort(vertical + horizontal)

    #score.check_nearby(photos)

    slides = convert_to_slides(photos)

    #random.shuffle(slides)
    print("Started write")
    file_io.write_output(dataset, slides)
    print("finished dataset " + dataset)

print("done")
示例#32
0
print(dfPosts)

df_maintags.to_csv("Tags.csv")
Tags = pd.read_csv("Tags.csv")
Posts = pd.read_csv("Posts.csv")
df_result = Tags.join(Posts)
df_result = df_result.drop(columns=["Unnamed: 0"])
df_result = df_result.sort_values(by=["posts"])

##########create folder and change directory################
try:
    os.makedirs(tagword)
except:
    print("folder exist")

os.chdir(tagword)
######################################################################

print(df_result)
name = "#" + tagword + ".csv"
df_result.to_csv(name, index=False)

sort(name)
delete_emptycols("sorted.csv")

gen("sorted1.csv")
os.chdir("..")
path = os.path.join(tagword, "hashtags.txt")
tagword = tagword + ".txt"
upload(tagword, path)