基本信息
源码名称:python 采集美图录网站的图片
源码大小:3.64KB
文件格式:.py
开发语言:Python
更新时间:2019-03-23
友情提示:(无需注册或充值,赞助后即可获取资源下载链接)
嘿,亲!知识可是无价之宝呢,但咱这精心整理的资料也耗费了不少心血呀。小小地破费一下,绝对物超所值哦!如有下载和支付问题,请联系我们QQ(微信同号):813200300
本次赞助数额为: 2 元×
微信扫码支付:2 元
×
请留下您的邮箱,我们将在2小时内将文件发到您的邮箱
源码介绍
import requests
from bs4 import BeautifulSoup
import os
import chardet
from chardet import detect
import re
headers = {
'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
'Accept': "text/html,application/xhtml xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
'Accept-Encoding': 'gzip',
"Referer": "https://www.meitulu.com/"
} #头部文件
path="jpg"
os.chdir(path)
url = 'https://www.meitulu.com/t/qingdouke/' #青豆客
#'https://www.meitulu.com/t/youwuguan/' 尤物馆
#'https://www.meitulu.com/t/girlt/' 果团网 , 'https://www.meitulu.com/t/mistar/', 魅妍社
#'https://www.meitulu.com/t/1088/', 星颜社 'https://www.meitulu.com/t/ishow/', iSHOW爱秀
#'https://www.meitulu.com/t/huayan/',花の颜 'https://www.meitulu.com/t/xingleyuan/',星乐园
#'https://www.meitulu.com/t/tukmo/', 'https://www.meitulu.com/t/aiss/',
#'https://www.meitulu.com/t/miitao/', 'https://www.meitulu.com/t/uxing/',
#'https://www.meitulu.com/t/taste/', 'https://www.meitulu.com/t/micat/',
#'https://www.meitulu.com/t/candy/', 'https://www.meitulu.com/t/yunvlang/',
#'https://www.meitulu.com/t/youmihui/', 'https://www.meitulu.com/t/1113/',
#'https://www.meitulu.com/t/1209/', 'https://www.meitulu.com/t/imiss/',
#'https://www.meitulu.com/t/yingsihui-wings/', https://www.meitulu.com/t/dianannan/ 嗲囡囡
response1 = requests.get(url, headers=headers, allow_redirects=False)
e = chardet.detect(response1.content)['encoding']
response1.encoding = e
html_soup1 = BeautifulSoup(response1.text, 'lxml')
all_a = html_soup1.find('ul', class_='img').find_all('li') #
count=0
for l in all_a:
count = count 1
a = l.a
#print(a)
if (a.find('img') == None):
continue
else:
href = a["href"]
print(href)
href2=href.replace('.html', '_')
response2 = requests.get(href, headers=headers)
e = chardet.detect(response2.content)['encoding']
response2.encoding = e
html_soup2 = BeautifulSoup(response2.text, 'lxml')
name = html_soup2.find('title').text.replace('/', '-')
os.mkdir(name)
print(name)
img1 = html_soup2.find("div", {"class": "content"}).find_all("img")
page = html_soup2.find("div", {"id": "pages"}).find_all('a')[-2]
max_page = page.get_text()
for pic in img1:
os.chdir(name)
count = count 1
pic1 = pic['src']
# print(pic1)
pic2 = requests.get(pic1, headers=headers)
f = open(name str(count) '.jpg', 'ab') ##写入多媒体文件必须要 b 这个参数!!必须要!!
f.write(pic2.content) ##多媒体文件要是用conctent哦!
f.close()
os.chdir("C:\\Users\\Administrator\\Desktop\\jpg")
for i in range(2, int(max_page) 1):
os.chdir(name)
href3 = href2 str(i) '.html'
response3 = requests.get(href3, headers=headers)
html_soup3 = BeautifulSoup(response3.text, 'lxml')
img1 = html_soup3.find("div", {"class": "content"}).find_all("img")
# print(href3)
for pic in img1:
count = count 1
pic1 = pic['src']
# print(pic1)
pic2 = requests.get(pic1, headers=headers)
f = open(name str(count) '.jpg', 'ab')
f.write(pic2.content)
f.close()
os.chdir("C:\\Users\\Administrator\\Desktop\\jpg")
print("完成!")