嘿,亲!知识可是无价之宝呢,但咱这精心整理的资料也耗费了不少心血呀。小小地破费一下,绝对物超所值哦!如有下载和支付问题,请联系我们QQ(微信同号):813200300
本次赞助数额为: 2 元微信扫码支付:2 元
请留下您的邮箱,我们将在2小时内将文件发到您的邮箱
利用爬虫技术批量下载网易云歌单歌曲。
class GetMusic():
def __init__(self):
self.headers = {
'Referer': 'http://music.163.com/',
'Host': 'music.163.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36',
'Accept': 'text/html,application/xhtml xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
}
self.session = requests.session()
def getmusic(self,page):
playlist='https://music.163.com/discover/playlist?order=hot&cat=%s&limit=35&offset=%s'%('全部',int(page)*35)
response = self.session.get(playlist, headers=self.headers).content
musl = BeautifulSoup(response, 'lxml')# 解析网站
music_list=[]
musics=musl.find_all(class_='u-cover u-cover-1')
for music in musics:
li=[]
li.append(music.a['title'])
li.append(music.div.a['data-res-id'])
music_list.append(li)
return music_list
def loadmusic(self,music_id):
# 歌单的url地址
play_url = 'http://music.163.com/playlist?id=%s'%music_id
# 获取页面内容
response = self.session.get(play_url, headers=self.headers).content
# 使用bs4匹配出对应的歌曲名称和地址
s = BeautifulSoup(response, 'lxml') # 解析网站
musics = s.find('ul', {'class': 'f-hide'})
plaer=s.find()
lists = []
for music in musics.find_all('a'):
print(music)
list = []
musicUrl = 'http://music.163.com/song/media/outer/url' music['href'][5:] '.mp3'
musicName = music.text
# 单首歌曲的名字和地址放在list列表中
list.append(musicName)
list.append(musicUrl)
# 全部歌曲信息放在lists列表中
lists.append(list)
return lists