从一个表中抓取多个网站数据



我正在练习抓取网站并将信息输入到表格中。我导入了链接,但我似乎得到了一个基于url的错误消息。

from bs4 import BeautifulSoup
import requests
import pandas as pd
eastern_basketball_players = {'mens_basketball':'https://www.espn.com/nba/seasonleaders/_/league/east'}
def scraping_processing(season_leaders, url):
    
    player = []
    teamcity = []
    games_played = []
    minutes_per_game = []
    points = []
    espn_score = []
    
    for link in url:
        
        page = requests.get(link)
        
        soup = BeautifulSoup(page.content, 'html.parser')
        
        raw_names = soup.find_all('td', class_ = 'Player')
        
        teams = soup.find_all('td', class_ = 'Team')
        
        games = soup.find_all('td', class_ = 'Games Played')
        
        minutes = soup.find_all('td', class_ = 'Minutes Per Game')
        
        pts = soup.find_all('td', class_ = 'Points Per Game')
        
        espnscores = soup.find_all('td', class_ = 'EPSN Rating')
        
        for raw_name in raw_names:
            player.append(raw_name.get_text().strip())
            
        for team in teams:
            teamcity.append(team.get_text().strip())
            
        for game in games:
            games_played.append(raw_name.get_text().strip())
            
        for minute in minutes:
            minutes_per_game.append(minute.get_text().strip())
            
        for pt in pts:
            points.append(pt.get_text().strip())
            
        for espnscore in espnscores:
            espn_score.append(espnscore.get_text().strip())
     
    filename = season_leaders + '.csv'
    df = pd.DataFrame()
    df['Names'] = player
    df['Teams'] = teamcity
    df['Games Played'] = games_played
    df['Minutes Per Game'] = minutes_per_game
    df['Points'] = points
    df['ESPN Scores'] = espn_score
    df.to_csv(filename, index = False)
    
east_mens_basketball_df = scraping_processing('mens_basketball', eastern_basketball_players['mens_basketball'])

我收到的错误信息是:

MissingSchema                             Traceback (most recent call last)
<ipython-input-49-ca254e49e854> in <module>
----> 1 east_mens_basketball_df = scraping_processing('mens_basketball', eastern_basketball_players['mens_basketball'])
<ipython-input-48-2f1a85c4b240> in scraping_processing(season_leaders, url)
     10     for link in url:
     11 
---> 12         page = requests.get(link)
     13 
     14         soup = BeautifulSoup(page.content, 'html.parser')
~anaconda3Pythonlibsite-packagesrequestsapi.py in get(url, params, **kwargs)
     74 
     75     kwargs.setdefault('allow_redirects', True)
---> 76     return request('get', url, params=params, **kwargs)
     77 
     78 
~anaconda3Pythonlibsite-packagesrequestsapi.py in request(method, url, **kwargs)
     59     # cases, and look like a memory leak in others.
     60     with sessions.Session() as session:
---> 61         return session.request(method=method, url=url, **kwargs)
     62 
     63 
~anaconda3Pythonlibsite-packagesrequestssessions.py in request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
    526             hooks=hooks,
    527         )
--> 528         prep = self.prepare_request(req)
    529 
    530         proxies = proxies or {}
~anaconda3Pythonlibsite-packagesrequestssessions.py in prepare_request(self, request)
    454 
    455         p = PreparedRequest()
--> 456         p.prepare(
    457             method=request.method.upper(),
    458             url=request.url,
~anaconda3Pythonlibsite-packagesrequestsmodels.py in prepare(self, method, url, headers, files, data, params, auth, cookies, hooks, json)
    314 
    315         self.prepare_method(method)
--> 316         self.prepare_url(url, params)
    317         self.prepare_headers(headers)
    318         self.prepare_cookies(cookies)
~anaconda3Pythonlibsite-packagesrequestsmodels.py in prepare_url(self, url, params)
    388             error = error.format(to_native_string(url, 'utf8'))
    389 
--> 390             raise MissingSchema(error)
    391 
    392         if not host:
MissingSchema: Invalid URL 'h': No schema supplied. Perhaps you meant http://h?

我重新检查了url以再次运行代码,但我仍然收到此错误消息。有人能帮帮我吗?

你需要摆脱循环,或者你的url需要在一个列表。

但更重要的是,你需要去复习html和如何识别标签和属性。在html中没有<td>标记与class="Player",或class="Team"等。

看一下这段代码,再看看html,看看它是如何遍历的。

from bs4 import BeautifulSoup
import requests
import pandas as pd
eastern_basketball_players = {'mens_basketball':['https://www.espn.com/nba/seasonleaders/_/league/east']}
def scraping_processing(season_leaders, url):
    
    rows = []
    
    for link in url:
        page = requests.get(link)
        
        soup = BeautifulSoup(page.content, 'html.parser')
        
        colNames = soup.find('tr', class_='colhead')
        colNames = [x.text for x in colNames.find_all('td')]
        
        trs = soup.find_all('tr')
        for row in trs:
            if row['class'][0] not in ['colhead', 'stathead']:
                rows.append([x.text for x in row.find_all('td')])
            
    filename = season_leaders + '.csv'
    df = pd.DataFrame(rows, columns=colNames)
    df.to_csv(filename, index = False)
    
east_mens_basketball_df = scraping_processing('mens_basketball', eastern_basketball_players['mens_basketball'])

将此https://www.espn.com/nba/seasonleaders/_/league/east作为url传递但是你在用for循环访问它例如url中的链接:您将得到link='h'

无效…此处不要使用for循环

最新更新