+
95
-

回答

两则有点不用,特别是selenium要注意顺序,还有参数设置,否则代理会无效。

requests

import requests

# 定义代理
proxies = {
    'http': 'http://代理ip或域名:端口',
    
}

# 定义请求头(可选)
headers = {
    'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) HeadlessChrome/86.0.4240.111 Safari/537.36'
}

# 发送GET请求
url = 'http://example.com'
response = requests.get(url, proxies=proxies, headers=headers)

# 打印响应内容
print(response.text)

selenium

#!/usr/local/python3/bin/python3
# -*- coding: utf-8 -*
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
from bs4 import BeautifulSoup
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import html

chromeOptions = webdriver.ChromeOptions()

# 添加代理参数
chromeOptions.add_argument('--proxy-server=http://代理ip或域名:端口')
#不要开启remote-debugging-port=9222,否则代理就会失效,headless也要放到后面
chromeOptions.add_argument("--headless")
#chromeOptions.add_argument("--remote-debugging-port=9222")
chromeOptions.add_argument('--no-sandbox')
browser = webdriver.Chrome('/usr/bin/chromedriver',chrome_options=chromeOptions)

browser.get("http://example.com")               #进入相关网站

try:
    sleep(1)

    html_content = browser.page_source
    
    # 使用BeautifulSoup解析网页内容
    #soup = BeautifulSoup(html_content, 'html.parser')
    print(html.escape(html_content))
    
    browser.quit()
    
    
finally:
    # 关闭浏览器
    browser.quit()
	

网友回复

我知道答案,我要回答