如何修复属性错误:'NoneType'对象没有属性"文本"...循环时



我是初学者,这个论坛上的答案非常宝贵。 我正在使用Python 3和Beautiful Soup通过循环页码从同一网站上的多个网页中抓取(非表(数据。 它可以工作,但我不断收到属性错误:"NoneType"对象在第一次迭代后没有属性"文本"。

这是我到目前为止尝试过的代码:

import requests
from bs4 import BeautifulSoup
import csv
import lxml

# Lists to store the scraped data in
addresses = []
geographies = []
rents = []
units = []
availabilities = []
# Scraping all pages
pages_url = requests.get('https://www.rent.com/new-york/tuckahoe-apartments')
pages_soup = BeautifulSoup(pages_url.text, 'html.parser')
list_nums = pages_soup.find('div', class_='_1y05u').text
print(list_nums)
pages = [str(i) for i in range(1,8)]
for page in pages:
    response = requests.get('https://www.rent.com/new-york/tuckahoe-apartments?page=' + page).text
    html_soup = BeautifulSoup(response, 'lxml')

    # Extract data from individual listing containers
    listing_containers = html_soup.find_all('div', class_='_3PdAH')
    print(type(listing_containers))
    print(len(listing_containers))

    for container in listing_containers:
        address = container.a.text
        addresses.append(address)
        geography = container.find('div', class_='_1dhrl').text
        geographies.append(geography)
        rent = container.find('div', class_='_3e12V').text
        rents.append(rent)
        unit = container.find('div', class_='_2tApa').text
        units.append(unit)
        availability = container.find('div', class_='_2P6xE').text
        availabilities.append(availability)
        import pandas as pd
        test_df = pd.DataFrame({'Street' : addresses,
                                'City-State-Zip' : geographies,
                                'Rent' : rents,
                                'BR/BA' : units,
                                'Units Available' : availabilities
        })
        print(test_df)

这是输出:

240 Properties
<class 'bs4.element.ResultSet'>
30
                     Street                      City-State-Zip     Rent                 BR/BA    Units Available
0  Quarry Place at Tuckahoe  64 Midland PlaceTuckahoe, NY 10707  $2,490+  1–2 Beds • 1–2 Baths  2 Units Available
Traceback (most recent call last):
  File "renttucktabletest.py", line 60, in <module>
    availability = container.find('div', class_='_2P6xE').text
AttributeError: 'NoneType' object has no attribute 'text'

我正在寻找的结果是 pandas 数据帧中的所有 240 个列表,与上面输出中显示的第一次迭代完全相同。 任何人都可以帮助修复此错误吗? 将不胜感激。 谢谢!

如前所述,问题是某些容器缺少某些div元素。 例如,没有"单位"或"可用性"信息。

处理此问题的一种方法是使用if - else语句。仅当元素存在时才追加,否则追加NaN值。像这样:

import requests
import numpy as np
from bs4 import BeautifulSoup
import csv
import lxml

# Lists to store the scraped data in
addresses = []
geographies = []
rents = []
units = []
availabilities = []
# Scraping all pages
pages_url = requests.get('https://www.rent.com/new-york/tuckahoe-apartments')
pages_soup = BeautifulSoup(pages_url.text, 'html.parser')
list_nums = pages_soup.find('div', class_='_1y05u').text
print(list_nums)
pages = [str(i) for i in range(1,8)]
for page in pages:
    response = requests.get('https://www.rent.com/new-york/tuckahoe-apartments?page=' + page).text
    html_soup = BeautifulSoup(response, 'lxml')

    # Extract data from individual listing containers
    listing_containers = html_soup.find_all('div', class_='_3PdAH')
    print(type(listing_containers))
    print(len(listing_containers))

    for container in listing_containers:
        address = container.a
        if address:
            addresses.append(address.text)
        else:
            addresses.append(np.nan)
        geography = container.find('div', class_='_1dhrl')
        if geography:
            geographies.append(geography.text)
        else:
            geographies.append(np.nan)
        rent = container.find('div', class_='_3e12V')
        if rent:
            rents.append(rent.text)
        else:
            rents.append(np.nan)
        unit = container.find('div', class_='_2tApa')
        if unit:
            units.append(unit.text)
        else:
            units.append(np.nan)
        availability = container.find('div', class_='_2P6xE')
        if availability:
            availabilities.append(availability.text)
        else:
            availabilities.append(np.nan)
import pandas as pd
test_df = pd.DataFrame({'Street' : addresses,
                        'City-State-Zip' : geographies,
                        'Rent' : rents,
                        'BR/BA' : units,
                        'Units Available' : availabilities
})
print(test_df)
                     Street                      City-State-Zip     Rent  
0  Quarry Place at Tuckahoe  64 Midland PlaceTuckahoe, NY 10707  $2,490+   
1     address not disclosed                  Tuckahoe, NY 10707   $2,510   
2     address not disclosed                  Tuckahoe, NY 10707   $4,145   
3        60 Washington St 1  60 Washington StTuckahoe, NY 10707   $3,500   
4        269 Columbus Ave 5  269 Columbus AveTuckahoe, NY 10707   $2,700   
                  BR/BA    Units Available  
0  1–2 Beds • 1–2 Baths  2 Units Available  
1        1 Bed • 1 Bath                NaN  
2       2 Beds • 2 Bath                NaN  
3       3 Beds • 2 Bath                NaN  
4       2 Beds • 1 Bath                NaN 

如果从脚本标记中提取信息并将其视为 json,则问题将消失。从 json 返回 none 或 0,如果您一直在尝试类名等,您会得到错误。

import requests
import json
from bs4 import BeautifulSoup as bs
import re
import pandas as pd
def add_records(url, s):
    res = requests.get(url)
    soup = bs(res.content, 'lxml')
    r = re.compile(r'window.__APPLICATION_CONTEXT__ = (.*)')
    data = soup.find('script', text=r).text
    script = r.findall(data)[0]
    items = json.loads(script)['store']['listings']['listings']
    for item in items:   
        street = item['address']
        geography = ', '.join([item['city'], item['state'], item['zipCode']])
        rent = item['aggregates']['prices']['low']
        BR_BA = 'beds: ' + str(item['aggregates']['beds']['low'])  + ' , ' + 'baths: ' + str(item['aggregates']['baths']['low'])
        units = item['aggregates']['totalAvailable']
        listingId = item['listingId']
        url = base_url + item['listingSeoPath']
        # all_info = item
        record = {'Street' : street,
                  'Geography' : geography,
                  'Rent' : rent,
                  'BR/BA' : BR_BA,
                  'Units Available' : units,
                  'ListingId' : listingId,
                  'Url' : url}
        results.append(record)
url = 'https://www.rent.com/new-york/tuckahoe-apartments?page={}'
base_url = 'https://www.rent.com/'
results = []
with requests.Session() as s:
    for page in range(1, 9):
        add_records(url.format(page), s)
df = pd.DataFrame(results, columns = [ 'Street', 'Geography', 'Rent', 'BR/BA', 'Units Available', 'ListingId', 'Url'])
print(df)

这是实现相同目的的另一种方法。

import pandas
import requests
from bs4 import BeautifulSoup
urls = ['https://www.rent.com/new-york/tuckahoe-apartments?page={}'.format(page) for page in range(1,9)]
def get_content(links):
    for url in links:
        res = requests.get(url)
        soup = BeautifulSoup(res.text, 'lxml')
        for items in soup.select("._3PdAH"):
            d = {}
            d['address'] = items.select_one("[data-tid='property-title']").text
            try:
                d['geographies'] = items.select_one("[data-tid='listing-info-address']").text
            except AttributeError: d['geographies'] = ""
            try:
                d['rent'] = items.select_one("[data-tid='price']").text
            except AttributeError: d['rent'] = ""
            try:
                d['units'] = items.select_one("[data-tid='beds-baths']").text
            except AttributeError: d['units'] = ""
            try:
                d['availabilities'] = items.select_one("[data-tid='property-unitAvailText']").text
            except AttributeError: d['availabilities'] = ""
            dataframe.append(d)
    return dataframe
if __name__ == '__main__':
    dataframe = []
    item = get_content(urls)
    df = pandas.DataFrame(item)
    df.to_csv("output.csv",index=False)

最新更新