# 有些网站请求出现 403 等,强制使用 tls1.2
发包的场景
#
urllib
版本import urllib
import ssl
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
response = urllib.request.urlopen(url=urllib.request.Request(url=url, headers=headers),
data=data, context=context)
response = response.read().decode()
print(response)
#
requests
版本import ssl
import requests
from requests.adapters import HTTPAdapter, PoolManager
class MyAdapter(HTTPAdapter):
# 重写 init_poolmanager 方法
def init_poolmanager(self, connections, maxsize, block=False, **pool_kwargs):
self.poolmanager = PoolManager(num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=ssl.PROTOCOL_TLSv1_2)
s = requests.Session()
s.mount('https://', MyAdapter())
res = s.post(url, data=data, headers=headers)
text = res.text
# 在
scrapy
中强制使用TLS1.2
的话需要在settings.py
中设置以下代码DOWNLOADER_CLIENT_TLS_METHOD="TLSv1.2"
或者在你的 class 类设置
custom_settings = {"DOWNLOAD_DELAY": 0.5, "DOWNLOADER_CLIENT_TLS_METHO": "TLSv1.2"}
# 参数有 TLS (默认), TLSv1.0, TLSv1.1, TLSv1.2, SSLv3
直接使用
httpx
请求import httpx
res = httpx.post(url, data=data, headers=headers)
text = res.text
转自:https://blog.csdn.net/weixin_43145985/article/details/119176590