python.requests实战《百度聘全站》
一,先看结果
文章图片
image.png 【python.requests实战《百度聘全站》】二,思路
1.找到页面的所有职位,迭代后进行请求
2.找到职位所对应的url链接,迭代后,合并职位进行请求抓取
3.实现手动选择职位,城市
4.实现一键入库筛查,排选
三,上代码
import requests
import json
import pymysql
import time
from bs4 import BeautifulSoup
import reclass Baidujob(object):
def __init__(self):
passdef mysql(self):conn = pymysql.Connect(host='x', user='x', password='x', database='x', port=x,
charset='x')
cursor = conn.cursor()
sql = "CREATE TABLE IF NOT EXISTS %s(ID INT(10) NOT NULL PRIMARY KEY AUTO_INCREMENT," \
"A VARCHAR(255)," \
"B VARCHAR(255)," \
"C VARCHAR(255))"
dbname = input('请输入数据库名:')
cursor.execute(sql % dbname)
print('创建数据库%s成功!' % dbname)def get_url(self,post, page, city):
url = 'http://zhaopin.baidu.com/api/quanzhiasync?query={}&sort_type=1&city={}&detailmode=close&rn=20&pn={}'.format(
post, city, page)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0;
WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 UBrowser/6.2.3964.2 Safari/537.36'
,
'Referer': 'http://zhaopin.baidu.com/quanzhi?tid=4139&ie=utf8&oe=utf8&query=python&city_sug=%E5%B9%BF%E5%B7%9E'
, 'Host': 'zhaopin.baidu.com'
,
'Cookie': 'Hm_lvt_dc173081ad0848b7d3e412373bb02119=1493714008;
PSTM=1500109449;
BIDUPSID=EBA58E2B59F5D325007E6FA067243233;
PRY=1;
BAIDUID=57829DB7D914A3AB376B0A2A7415FD8C:FG=1;
Hm_lvt_da3258e243c3132f66f0f3c247b48473=1509328256;
Hm_lvt_24117ca0ed302abec8cd5b93e02d18cd=1509858595;
BDUSS=ltQWtHcEYxcEt6eEVkdzBUemo0R1dPZHJxdE9LOE5EbXhYaHpQMWJTVEUta0ZhQVFBQUFBJCQAAAAAAAAAAAEAAABIwC9~eWFuZ2Z1bG9uZ2hvbWUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMRtGlrEbRpaOF;
MCITY=-%3A;
BDRCVFR[VIIOqqdxwZ_]=mk3SLVN4HKm;
PSINO=7;
H_PS_PSSID=1460_21111_22075;
URLTITLESALARY=%20;
Hm_lvt_c676f95eebbd4fa8a59418f48090ac4d=1513303313,1514278789,1514336165,1515219239;
Hm_lpvt_c676f95eebbd4fa8a59418f48090ac4d=1515219701'}
# pn,0,20
data = https://www.it610.com/article/{'pn': '{}'.format(page)
, 'rn': '20'
, 'detailmode': 'close'
, 'city_sug': '{}'.format(city)
, 'sort_type': '1'
, 'query': '{}'.format(post)}try:
response = requests.get(url, headers=headers, params=data).text
json_dict = json.loads(response)
json_data = https://www.it610.com/article/json_dict['data']
json_main = json_data['main']
json_datas = json_main['data']i = 0
item = json_datas.get('disp_data')
for items in item:
####A
city = items.get('city')
price = items.get('salary')
title = items.get('title')
buty = items.get('description_jd')
terrace = items.get('source')
times = items.get('lastmod')
company_data = https://www.it610.com/article/items.get('officialname'), ':,岗位:{},城市:{},工资:{}\n,{}平台:{},{}\n'.format(title, city,
price.split('-')[0],
terrace, buty, times)
# print(company_data)
company = items.get('officialname')
###B
phone1 = items.get('@cts')
phone2 = items.get('@dts')
email = items.get('email')
companyb = items.get('officialname'), '电话:{},电话:{},EMail:{}'.format(phone1, phone2, email)i = i + 1
# 变成str,好存入MySQL
company_str1 = ''.join(tuple(company_data))
company_str2_phone = ''.join(tuple(companyb))
print(i)
print(company_str1, company_str2_phone)# print(company,title)
# print(i)
####数据库
#sql = "INSERT INTO %s(A,B)VALUES('%s','%s')"
#values = (dbname, company_str1, company_str2_phone)
#cursor.execute(sql % values)
#print('导入成功')
# conn.commit()
# time.sleep(0.1)
except:
print('response error!!!')
return Noneelse:
passdef main(self):
urls = 'http://zhaopin.baidu.com/quanzhi?tid=4139&ie=utf8&oe=utf8&query=%E6%95%B0%E6%8D%AE%E5%88%86%E6%9E%90&city_sug=%E5%B9%BF%E5%B7%9E'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0;
WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 UBrowser/6.2.3964.2 Safari/537.36'
,
'Referer': 'http://zhaopin.baidu.com/quanzhi?tid=4139&ie=utf8&oe=utf8&query=python&city_sug=%E5%B9%BF%E5%B7%9E'
, 'Host': 'zhaopin.baidu.com'
,
'Cookie': 'Hm_lvt_dc173081ad0848b7d3e412373bb02119=1493714008;
PSTM=1500109449;
BIDUPSID=EBA58E2B59F5D325007E6FA067243233;
PRY=1;
BAIDUID=57829DB7D914A3AB376B0A2A7415FD8C:FG=1;
Hm_lvt_da3258e243c3132f66f0f3c247b48473=1509328256;
Hm_lvt_24117ca0ed302abec8cd5b93e02d18cd=1509858595;
BDUSS=ltQWtHcEYxcEt6eEVkdzBUemo0R1dPZHJxdE9LOE5EbXhYaHpQMWJTVEUta0ZhQVFBQUFBJCQAAAAAAAAAAAEAAABIwC9~eWFuZ2Z1bG9uZ2hvbWUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMRtGlrEbRpaOF;
MCITY=-%3A;
BDRCVFR[VIIOqqdxwZ_]=mk3SLVN4HKm;
PSINO=7;
H_PS_PSSID=1460_21111_22075;
URLTITLESALARY=%20;
Hm_lvt_c676f95eebbd4fa8a59418f48090ac4d=1513303313,1514278789,1514336165,1515219239;
Hm_lpvt_c676f95eebbd4fa8a59418f48090ac4d=1515219701'}
try:
response = requests.get(urls, headers).text
soup = BeautifulSoup(response, 'lxml')
item = soup.find_all('div', attrs={'class': re.compile('all-jobs')})
city = soup.find_all('div', attrs={'class': 'tabs-body'})
for citys in city:
cityss = citys.find_all('dd')
for g in cityss:
city_name = g.get_text()
# city_name是城市名字
# print(city_name)for items in item:
a = items.find_all('a')
for b in a:
profession = b.get_text()
# 职业for page in range(0, 2000, 20):
# page=页码self.get_url(post, city=city_name, page=page)
print('城市:%s,行业%s,页码%s' % (city_name, profession, page))
except:
print('main,error!!!')if __name__ == '__main__':
post = input('请输入职位:')
l = Baidujob()
l.main()
# post = 'Python'
# city ='天津'
# for page in range(0,100,20):
#
#get_url(post,page,city)
#print(page)####数据库
# cursor.close()
# conn.close()print('CLOSE DATABASE OK!!!')
推荐阅读
- 慢慢的美丽
- 《真与假的困惑》???|《真与假的困惑》??? ——致良知是一种伟大的力量
- 《跨界歌手》:亲情永远比爱情更有泪点
- 诗歌:|诗歌: 《让我们举起世界杯,干了!》
- 期刊|期刊 | 国内核心期刊之(北大核心)
- 《魔法科高中的劣等生》第26卷(Invasion篇)发售
- 人间词话的智慧
- 《一代诗人》37期,生活,江南j,拨动心潭的一泓秋水
- 广角叙述|广角叙述 展众生群像——试析鲁迅《示众》的展示艺术
- 书评——《小行星》