python爬取拉勾网数据并进行数据可视化( 三 )


import requestsimport mathimport timeimport pandas as pddef get_json(url, num): """ 从指定的url中通过requests请求携带请求头和请求体获取网页中的信息, :return: """ url1 = 'https://www.lagou.com/jobs/list_python%E5%BC%80%E5%8F%91%E5%B7%A5%E7%A8%8B%E5%B8%88?labelWords=&fromSearch=true&suginput=' headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36', 'Host': 'www.lagou.com', 'Referer': 'https://www.lagou.com/jobs/list_%E6%95%B0%E6%8D%AE%E5%88%86%E6%9E%90?labelWords=&fromSearch=true&suginput=', 'X-Anit-Forge-Code': '0', 'X-Anit-Forge-Token': 'None', 'X-Requested-With': 'XMLHttpRequest' } data = https://www.isolves.com/it/cxkf/yy/Python/2019-08-21/{ 'first': 'true', 'pn': num, 'kd': 'python工程师'} s = requests.Session() print('建立session:', s, 'nn') s.get(url=url1, headers=headers, timeout=3) cookie = s.cookies print('获取cookie:', cookie, 'nn') res = requests.post(url, headers=headers, data=data, cookies=cookie, timeout=3) res.raise_for_status() res.encoding = 'utf-8' page_data = res.json() print('请求响应结果:', page_data, 'nn') return page_datadef get_page_num(count): """ 计算要抓取的页数,通过在拉勾网输入关键字信息,可以发现最多显示30页信息,每页最多显示15个职位信息 :return: """ page_num = math.ceil(count / 15) if page_num > 30: return 30 else: return page_numdef get_page_info(jobs_list): """ 获取职位 :param jobs_list: :return: """ page_info_list = [] for i in jobs_list: # 循环每一页所有职位信息 job_info = [] job_info.append(i['companyFullName']) job_info.append(i['companyShortName']) job_info.append(i['companySize']) job_info.append(i['financeStage']) job_info.append(i['district']) job_info.append(i['positionName']) job_info.append(i['workYear']) job_info.append(i['education']) job_info.append(i['salary']) job_info.append(i['positionAdvantage']) job_info.append(i['industryField']) job_info.append(i['firstType']) job_info.append(i['companyLabelList']) job_info.append(i['secondType']) job_info.append(i['city']) page_info_list.append(job_info) return page_info_listdef main(): url = ' https://www.lagou.com/jobs/positionAjax.json?needAddtionalResult=false' first_page = get_json(url, 1) total_page_count = first_page['content']['positionResult']['totalCount'] num = get_page_num(total_page_count) total_info = [] time.sleep(10) print("python开发相关职位总数:{},总页数为:{}".format(total_page_count, num)) for num in range(1, num + 1): # 获取每一页的职位相关的信息 page_data = get_json(url, num) # 获取响应json jobs_list = page_data['content']['positionResult']['result'] # 获取每页的所有python相关的职位信息 page_info = get_page_info(jobs_list) print("每一页python相关的职位信息:%s" % page_info, 'nn') total_info += page_info print('已经爬取到第{}页,职位总数为{}'.format(num, len(total_info))) time.sleep(20) # 将总数据转化为data frame再输出,然后在写入到csv各式的文件中 df = pd.DataFrame(data=total_info, columns=['公司全名', '公司简称', '公司规模', '融资阶段', '区域', '职位名称', '工作经验', '学历要求', '薪资', '职位福利', '经营范围', '职位类型', '公司福利', '第二职位类型', '城市']) # df.to_csv('Python_development_engineer.csv', index=False) print('python相关职位信息已保存')if __name__ == '__main__': main()2、可视化完整代码
数据可视化涉及到matplotlib、jieba、wordcloud、pyecharts、pylab、scipy等等模块的使用,读者可以自行了解各个模块的使用方法,和其中涉及的各种参数
import pandas as pdimport matplotlib.pyplot as pltimport statsmodels.api as smfrom wordcloud import WordCloudfrom scipy.misc import imread# from imageio import imreadimport jiebafrom pylab import mpl# 使用matplotlib能够显示中文mpl.rcParams['font.sans-serif'] = ['SimHei'] # 指定默认字体mpl.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题# 读取数据df = pd.read_csv('Python_development_engineer.csv', encoding='utf-8')# 进行数据清洗,过滤掉实习岗位# df.drop(df[df['职位名称'].str.contains('实习')].index, inplace=True)# print(df.describe())# 由于csv文件中的字符是字符串形式,先用正则表达式将字符串转化为列表,在去区间的均值pattern = 'd+'# print(df['工作经验'], 'nnn')# print(df['工作经验'].str.findall(pattern))df['工作年限'] = df['工作经验'].str.findall(pattern)print(type(df['工作年限']), 'nnn')avg_work_year = []count = 0for i in df['工作年限']: # print('每个职位对应的工作年限',i) # 如果工作经验为'不限'或'应届毕业生',那么匹配值为空,工作年限为0 if len(i) == 0: avg_work_year.append(0) # print('nihao') count += 1 # 如果匹配值为一个数值,那么返回该数值 elif len(i) == 1: # print('hello world') avg_work_year.append(int(''.join(i))) count += 1 # 如果匹配为一个区间则取平均值 else: num_list = [int(j) for j in i] avg_year = sum(num_list) / 2 avg_work_year.append(avg_year) count += 1print(count)df['avg_work_year'] = avg_work_year# 将字符串转化为列表,薪资取最低值加上区间值得25%,比较贴近现实df['salary'] = df['薪资'].str.findall(pattern)#avg_salary_list = []for k in df['salary']: int_list = [int(n) for n in k] avg_salary = int_list[0] + (int_list[1] - int_list[0]) / 4 avg_salary_list.append(avg_salary)df['月薪'] = avg_salary_list# df.to_csv('python.csv', index=False)"""1、绘制python薪资的频率直方图并保存"""plt.hist(df['月薪'], bins=8, facecolor='#ff6700', edgecolor='blue') # bins是默认的条形数目plt.xlabel('薪资(单位/千元)')plt.ylabel('频数/频率')plt.title('python薪资直方图')plt.savefig('python薪资分布.jpg')plt.show()"""2、绘制饼状图并保存"""city = df['城市'].value_counts()print(type(city))# print(len(city))label = city.keys()print(label)city_list = []count = 0n = 1distance = []for i in city: city_list.append(i) print('列表长度', len(city_list)) count += 1 if count > 5: n += 0.1 distance.append(n) else: distance.append(0)plt.pie(city_list, labels=label, labeldistance=1.2, autopct='%2.1f%%', pctdistance=0.6, shadow=True, explode=distance)plt.axis('equal') # 使饼图为正圆形plt.legend(loc='upper left', bbox_to_anchor=(-0.1, 1))plt.savefig('python地理位置分布图.jpg')plt.show()"""3、绘制福利待遇的词云"""text = ''for line in df['公司福利']: if len(eval(line)) == 0: continue else: for word in eval(line): # print(word) text += wordcut_word = ','.join(jieba.cut(text))word_background = imread('公主.jpg')cloud = WordCloud( font_path=r'C:WindowsFontssimfang.ttf', background_color='black', mask=word_background, max_words=500, max_font_size=100, width=400, height=800)word_cloud = cloud.generate(cut_word)word_cloud.to_file('福利待遇词云.png')plt.imshow(word_cloud)plt.axis('off')plt.show()"""4、基于pyechart的柱状图"""city = df['城市'].value_counts()print(type(city))print(city)# print(len(city))keys = city.index # 等价于keys = city.keys()values = city.valuesfrom pyecharts import Barbar = Bar("python职位的城市分布图")bar.add("城市", keys, values)bar.print_echarts_options() # 该行只为了打印配置项,方便调试时使用bar.render(path='a.html')


推荐阅读