Python|Python 爬虫实例(14) 爬取 百度音乐
#-*-coding:utf-8-*- from common.contest import * import urllibdef spider(): song_types = ['新歌','热歌','中国好声音','经典老歌','电视剧','广场舞','欧美','轻音乐','DJ 舞曲','80后','网络歌曲','劲爆','儿歌','纯音乐','粤语','民歌','钢琴曲','萨克斯','古典音乐'] for song_type in song_types: url_song_type = urllib.quote(song_type) for i in range(0,51): start = i * 20 url = 'http://music.baidu.com/tag/'+ str(url_song_type)+'?size=20&start='+ str(start)+'&third_type=0' print url headers = {"Host":"music.baidu.com", "Connection":"keep-alive", "Cache-Control":"max-age=0", "Upgrade-Insecure-Requests":"1", "User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36", "Accept":"text/html,application/xhtml+xml,application/xml; q=0.9,image/webp,image/apng,*/*; q=0.8", "Referer":url, "Accept-Encoding":"gzip, deflate", "Accept-Language":"zh-CN,zh; q=0.9", # "Cookie":"checkStatus=true; BIDUPSID=F76081B6DCEF178EB115E76CFFABDFFF; PSTM=1490192233; __cfduid=dc0607f001fdddad698f98a17b619d9461517674324; BAIDUID=FCBB590CDE88FE3F4965949AD0A91252:FG=1; MCITY=-%3A; BDUSS=FXUDdYdmVacmV3cC1nNXhnM2RlRi1UWEw3dTFuUzdjSHFvTXZaTlpmdGktUnRiQVFBQUFBJCQAAAAAAAAAAAEAAACeLk0x0O20usHWMTY4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGJs9FpibPRaQl; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; H_PS_PSSID=13290_1434_21114_20883_20929; PSINO=2; BCLID=13234662273182259149; BDSFRCVID=LeIsJeC6246SbPQAU-w6KwKAG0BRyj7TH6-JNTcy6f-W_zkxmhlfEG0PqU8g0Ku-jgO9ogKK0mOTHvjP; H_BDCLCKID_SF=tJkt_K-aJKvjD4-k247Hhn8thmT22-usBITAQhcH0KLKMKQb-l3GLqQD5Nji-MnC3bRGbtT_JMb1M66_XlOj2jKEqJJdhtnOaCbQ0q5TtUJaSDnTDMRhqtIsXNryKMnitIj9-pnK2ft0HPonHjKhejv-3f; BDRCVFR[feWj1Vr5u3D]=I67x6TjHwwYf0; BAIDU_DUP_lcr=https://www.duba.com/?f=qd_sch; userid=827141790; app_vip=show; Hm_lvt_d0ad46e4afeacf34cd12de4c9b553aa6=1526222318; u_id=; u_t=; UM_distinctid=16359f080b3a3-0802715d516d47-454c092b-ff000-16359f080b450a; CNZZDATA1262632547=1637929121-1526217035-http%253A%252F%252Fmusic.baidu.com%252F%7C1526217035; u_lo=0; checkStatus=true; tracesrc=https://www.it610.com/article/-1%7C%7C-1; Hm_lpvt_d0ad46e4afeacf34cd12de4c9b553aa6=1526222739",}result = requests.session().get(url=url,headers=headers)if result.status_code ==200: result_html = result.content # print result_htmlsoup = BeautifulSoup(result_html,'html.parser') result_divs = soup.find_all('div',attrs={"class":"song-item clearfix "}) print len(result_divs) for result_div in result_divs: result_replace = str(result_div).replace('\r\n\t','
').replace('\n\t','
').replace('\n','
') print result_replace index_num = re.findall('(.*?)',result_replace)[0] song_url_name = re.findall('href="https://www.it610.com/article/(.*?)" target="_blank" title=".*?">(.*?)',result_replace)[0] song_url = song_url_name[0] song_name = song_url_name[1]if '' in result_replace: try: appendix = re.findall('(.*?)',str(result_replace))[0] except: appendix = re.findall('(.*?)', str(result_replace))[0] else: appendix = ""author_list = re.findall('',result_replace)[0] if '' in result_replace: author_url = re.findall('',result_replace)[0] author_url = "http://music.baidu.com/" + author_url else: author_url = ""song_url = "http://music.baidu.com/" + song_urlprint author_url print song_url print author_list print appendix print index_num print song_url print song_namedata_dict ={"author_url":author_url, "song_url":song_url, "author_list":author_list, "appendix":appendix, "index_num":index_num, "song_name":song_name, } # 插入到数据库中去 dbName = "baidu_music" insert_data(dbName=dbName,data_dict=data_dict)print "="* 88 # time.sleep(2)spider()
【Python|Python 爬虫实例(14) 爬取 百度音乐】转载于:https://www.cnblogs.com/xuchunlin/p/9034072.html
推荐阅读
- python学习之|python学习之 实现QQ自动发送消息
- 逻辑回归的理解与python示例
- python自定义封装带颜色的logging模块
- 【Leetcode/Python】001-Two|【Leetcode/Python】001-Two Sum
- Python基础|Python基础 - 练习1
- Python爬虫|Python爬虫 --- 1.4 正则表达式(re库)
- Python(pathlib模块)
- python青少年编程比赛_第十一届蓝桥杯大赛青少年创意编程组比赛细则
- Python数据分析(一)(Matplotlib使用)
- 爬虫数据处理HTML转义字符