无意间习得独孤九剑钓鱼式,帮各位知友承包鱼塘!
(如果你是男生,我教你另外一套拳,水果妹的水果拳,….)
改动下面的问题编号,即可或许相应的问题下的图片,记得有两处。
eg:35846840
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
#! /usr/bin/env python from urlparse import urlsplit from os.path import basename import urllib2 import re import requests import os import json url = 'https://www.zhihu.com/question/35846840' if not os.path.exists('images'): os.mkdir("images") page_size = 50 offset = 0 url_content = urllib2.urlopen(url).read() answers = re.findall('h3 data-num="(.*?)"', url_content) limits = int(answers[0]) while offset < limits: post_url = "http://www.zhihu.com/node/QuestionAnswerListV2" params = json.dumps({ 'url_token': 35846840, 'pagesize': page_size, 'offset': offset }) data = { '_xsrf': '', 'method': 'next', 'params': params } header = { 'User-Agent': "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:34.0) Gecko/20100101 Firefox/34.0", 'Host': "www.zhihu.com", 'Referer': url } response = requests.post(post_url, data=data, headers=header) answer_list = response.json()["msg"] img_urls = re.findall('img .*?src="(.*?_b.*?)"', ''.join(answer_list)) for img_url in img_urls: try: img_data = urllib2.urlopen(img_url).read() file_name = basename(urlsplit(img_url)[2]) output = open('images/' + file_name, 'wb') output.write(img_data) output.close() except: pass offset += page_size |
comment:知乎上习得钓鱼式 。