|
|
@@ -23,15 +23,7 @@ class Crawler(object, metaclass=ProxyMetaclass):
|
|
|
print('成功获取到代理', proxy)
|
|
|
proxies.append(proxy)
|
|
|
return proxies
|
|
|
-
|
|
|
- # def crawl_daxiang(self):
|
|
|
- # url = 'http://vtp.daxiangdaili.com/ip/?tid=559363191592228&num=50&filter=on'
|
|
|
- # html = get_page(url)
|
|
|
- # if html:
|
|
|
- # urls = html.split('\n')
|
|
|
- # for url in urls:
|
|
|
- # yield url
|
|
|
-
|
|
|
+
|
|
|
def crawl_daili66(self, page_count=4):
|
|
|
"""
|
|
|
获取代理66
|
|
|
@@ -51,47 +43,6 @@ class Crawler(object, metaclass=ProxyMetaclass):
|
|
|
port = tr.find('td:nth-child(2)').text()
|
|
|
yield ':'.join([ip, port])
|
|
|
|
|
|
- def crawl_proxy360(self):
|
|
|
- """
|
|
|
- 获取Proxy360
|
|
|
- :return: 代理
|
|
|
- """
|
|
|
- start_url = 'http://www.proxy360.cn/Region/China'
|
|
|
- print('Crawling', start_url)
|
|
|
- html = get_page(start_url)
|
|
|
- if html:
|
|
|
- doc = pq(html)
|
|
|
- lines = doc('div[name="list_proxy_ip"]').items()
|
|
|
- for line in lines:
|
|
|
- ip = line.find('.tbBottomLine:nth-child(1)').text()
|
|
|
- port = line.find('.tbBottomLine:nth-child(2)').text()
|
|
|
- yield ':'.join([ip, port])
|
|
|
-
|
|
|
- def crawl_goubanjia(self):
|
|
|
- """
|
|
|
- 获取Goubanjia
|
|
|
- :return: 代理
|
|
|
- """
|
|
|
- start_url = 'http://www.goubanjia.com/free/gngn/index.shtml'
|
|
|
- html = get_page(start_url)
|
|
|
- if html:
|
|
|
- doc = pq(html)
|
|
|
- tds = doc('td.ip').items()
|
|
|
- for td in tds:
|
|
|
- td.find('p').remove()
|
|
|
- yield td.text().replace(' ', '')
|
|
|
-
|
|
|
- def crawl_ip181(self):
|
|
|
- start_url = 'http://www.ip181.com/'
|
|
|
- html = get_page(start_url)
|
|
|
- ip_address = re.compile('<tr.*?>\s*<td>(.*?)</td>\s*<td>(.*?)</td>')
|
|
|
- # \s* 匹配空格,起到换行作用
|
|
|
- re_ip_address = ip_address.findall(html)
|
|
|
- for address,port in re_ip_address:
|
|
|
- result = address + ':' + port
|
|
|
- yield result.replace(' ', '')
|
|
|
-
|
|
|
-
|
|
|
def crawl_ip3366(self):
|
|
|
for page in range(1, 4):
|
|
|
start_url = 'http://www.ip3366.net/free/?stype=1&page={}'.format(page)
|
|
|
@@ -102,42 +53,6 @@ class Crawler(object, metaclass=ProxyMetaclass):
|
|
|
for address, port in re_ip_address:
|
|
|
result = address+':'+ port
|
|
|
yield result.replace(' ', '')
|
|
|
-
|
|
|
-
|
|
|
- def crawl_kxdaili(self):
|
|
|
- for i in range(1, 11):
|
|
|
- start_url = 'http://www.kxdaili.com/ipList/{}.html#ip'.format(i)
|
|
|
- html = get_page(start_url)
|
|
|
- ip_address = re.compile('<tr.*?>\s*<td>(.*?)</td>\s*<td>(.*?)</td>')
|
|
|
- # \s* 匹配空格,起到换行作用
|
|
|
- re_ip_address = ip_address.findall(html)
|
|
|
- for address, port in re_ip_address:
|
|
|
- result = address + ':' + port
|
|
|
- yield result.replace(' ', '')
|
|
|
-
|
|
|
-
|
|
|
- def crawl_premproxy(self):
|
|
|
- for i in ['China-01','China-02','China-03','China-04','Taiwan-01']:
|
|
|
- start_url = 'https://premproxy.com/proxy-by-country/{}.htm'.format(i)
|
|
|
- html = get_page(start_url)
|
|
|
- if html:
|
|
|
- ip_address = re.compile('<td data-label="IP:port ">(.*?)</td>')
|
|
|
- re_ip_address = ip_address.findall(html)
|
|
|
- for address_port in re_ip_address:
|
|
|
- yield address_port.replace(' ','')
|
|
|
-
|
|
|
- def crawl_xroxy(self):
|
|
|
- for i in ['CN','TW']:
|
|
|
- start_url = 'http://www.xroxy.com/proxylist.php?country={}'.format(i)
|
|
|
- html = get_page(start_url)
|
|
|
- if html:
|
|
|
- ip_address1 = re.compile("title='View this Proxy details'>\s*(.*).*")
|
|
|
- re_ip_address1 = ip_address1.findall(html)
|
|
|
- ip_address2 = re.compile("title='Select proxies with port number .*'>(.*)</a>")
|
|
|
- re_ip_address2 = ip_address2.findall(html)
|
|
|
- for address,port in zip(re_ip_address1,re_ip_address2):
|
|
|
- address_port = address+':'+port
|
|
|
- yield address_port.replace(' ','')
|
|
|
|
|
|
def crawl_kuaidaili(self):
|
|
|
for i in range(1, 4):
|
|
|
@@ -206,15 +121,6 @@ class Crawler(object, metaclass=ProxyMetaclass):
|
|
|
address_port = address+':'+port
|
|
|
yield address_port.replace(' ','')
|
|
|
|
|
|
- def crawl_89ip(self):
|
|
|
- start_url = 'http://www.89ip.cn/apijk/?&tqsl=1000&sxa=&sxb=&tta=&ports=&ktip=&cf=1'
|
|
|
- html = get_page(start_url)
|
|
|
- if html:
|
|
|
- find_ips = re.compile('(\d+\.\d+\.\d+\.\d+:\d+)', re.S)
|
|
|
- ip_ports = find_ips.findall(html)
|
|
|
- for address_port in ip_ports:
|
|
|
- yield address_port
|
|
|
-
|
|
|
def crawl_data5u(self):
|
|
|
start_url = 'http://www.data5u.com/free/gngn/index.shtml'
|
|
|
headers = {
|