|
|
@@ -2,6 +2,7 @@ import json
|
|
|
import re
|
|
|
from .utils import get_page
|
|
|
from pyquery import PyQuery as pq
|
|
|
+from bs4 import BeautifulSoup as bsp
|
|
|
|
|
|
|
|
|
class ProxyMetaclass(type):
|
|
|
@@ -84,11 +85,11 @@ class Crawler(object, metaclass=ProxyMetaclass):
|
|
|
def crawl_ip181(self):
|
|
|
start_url = 'http://www.ip181.com/'
|
|
|
html = get_page(start_url)
|
|
|
- ip_adress = re.compile('<tr.*?>\s*<td>(.*?)</td>\s*<td>(.*?)</td>')
|
|
|
+ ip_address = re.compile('<tr.*?>\s*<td>(.*?)</td>\s*<td>(.*?)</td>')
|
|
|
# \s* 匹配空格,起到换行作用
|
|
|
- re_ip_adress = ip_adress.findall(html)
|
|
|
- for adress,port in re_ip_adress:
|
|
|
- result = adress + ':' + port
|
|
|
+ re_ip_address = ip_address.findall(html)
|
|
|
+ for address,port in re_ip_address:
|
|
|
+ result = address + ':' + port
|
|
|
yield result.replace(' ', '')
|
|
|
|
|
|
|
|
|
@@ -96,34 +97,23 @@ class Crawler(object, metaclass=ProxyMetaclass):
|
|
|
for page in range(1, 4):
|
|
|
start_url = 'http://www.ip3366.net/free/?stype=1&page={}'.format(page)
|
|
|
html = get_page(start_url)
|
|
|
- ip_adress = re.compile('<tr>\s*<td>(.*?)</td>\s*<td>(.*?)</td>')
|
|
|
+ ip_address = re.compile('<tr>\s*<td>(.*?)</td>\s*<td>(.*?)</td>')
|
|
|
# \s * 匹配空格,起到换行作用
|
|
|
- re_ip_adress = ip_adress.findall(html)
|
|
|
- for adress, port in re_ip_adress:
|
|
|
- result = adress+':'+ port
|
|
|
+ re_ip_address = ip_address.findall(html)
|
|
|
+ for address, port in re_ip_address:
|
|
|
+ result = address+':'+ port
|
|
|
yield result.replace(' ', '')
|
|
|
|
|
|
|
|
|
- def crawl_data5u(self):
|
|
|
- for i in ['gngn', 'gnpt']:
|
|
|
- start_url = 'http://www.data5u.com/free/{}/index.shtml'.format(i)
|
|
|
- html = get_page(start_url)
|
|
|
- ip_adress = re.compile(' <ul class="l2">\s*<span><li>(.*?)</li></span>\s*<span style="width: 100px;"><li class=".*">(.*?)</li></span>')
|
|
|
- # \s * 匹配空格,起到换行作用
|
|
|
- re_ip_adress = ip_adress.findall(html)
|
|
|
- for adress, port in re_ip_adress:
|
|
|
- result = adress+':'+port
|
|
|
- yield result.replace(' ','')
|
|
|
-
|
|
|
def crawl_kxdaili(self):
|
|
|
- for i in range(1, 4):
|
|
|
+ for i in range(1, 11):
|
|
|
start_url = 'http://www.kxdaili.com/ipList/{}.html#ip'.format(i)
|
|
|
html = get_page(start_url)
|
|
|
- ip_adress = re.compile('<tr.*?>\s*<td>(.*?)</td>\s*<td>(.*?)</td>')
|
|
|
+ ip_address = re.compile('<tr.*?>\s*<td>(.*?)</td>\s*<td>(.*?)</td>')
|
|
|
# \s* 匹配空格,起到换行作用
|
|
|
- re_ip_adress = ip_adress.findall(html)
|
|
|
- for adress, port in re_ip_adress:
|
|
|
- result = adress + ':' + port
|
|
|
+ re_ip_address = ip_address.findall(html)
|
|
|
+ for address, port in re_ip_address:
|
|
|
+ result = address + ':' + port
|
|
|
yield result.replace(' ', '')
|
|
|
|
|
|
|
|
|
@@ -132,36 +122,36 @@ class Crawler(object, metaclass=ProxyMetaclass):
|
|
|
start_url = 'https://premproxy.com/proxy-by-country/{}.htm'.format(i)
|
|
|
html = get_page(start_url)
|
|
|
if html:
|
|
|
- ip_adress = re.compile('<td data-label="IP:port ">(.*?)</td>')
|
|
|
- re_ip_adress = ip_adress.findall(html)
|
|
|
- for adress_port in re_ip_adress:
|
|
|
- yield adress_port.replace(' ','')
|
|
|
+ ip_address = re.compile('<td data-label="IP:port ">(.*?)</td>')
|
|
|
+ re_ip_address = ip_address.findall(html)
|
|
|
+ for address_port in re_ip_address:
|
|
|
+ yield address_port.replace(' ','')
|
|
|
|
|
|
def crawl_xroxy(self):
|
|
|
for i in ['CN','TW']:
|
|
|
start_url = 'http://www.xroxy.com/proxylist.php?country={}'.format(i)
|
|
|
html = get_page(start_url)
|
|
|
if html:
|
|
|
- ip_adress1 = re.compile("title='View this Proxy details'>\s*(.*).*")
|
|
|
- re_ip_adress1 = ip_adress1.findall(html)
|
|
|
- ip_adress2 = re.compile("title='Select proxies with port number .*'>(.*)</a>")
|
|
|
- re_ip_adress2 = ip_adress2.findall(html)
|
|
|
- for adress,port in zip(re_ip_adress1,re_ip_adress2):
|
|
|
- adress_port = adress+':'+port
|
|
|
- yield adress_port.replace(' ','')
|
|
|
+ ip_address1 = re.compile("title='View this Proxy details'>\s*(.*).*")
|
|
|
+ re_ip_address1 = ip_address1.findall(html)
|
|
|
+ ip_address2 = re.compile("title='Select proxies with port number .*'>(.*)</a>")
|
|
|
+ re_ip_address2 = ip_address2.findall(html)
|
|
|
+ for address,port in zip(re_ip_address1,re_ip_address2):
|
|
|
+ address_port = address+':'+port
|
|
|
+ yield address_port.replace(' ','')
|
|
|
|
|
|
def crawl_kuaidaili(self):
|
|
|
for i in range(1, 4):
|
|
|
start_url = 'http://www.kuaidaili.com/free/inha/{}/'.format(i)
|
|
|
html = get_page(start_url)
|
|
|
if html:
|
|
|
- ip_adress = re.compile('<td data-title="IP">(.*?)</td>')
|
|
|
- re_ip_adress = ip_adress.findall(html)
|
|
|
+ ip_address = re.compile('<td data-title="IP">(.*?)</td>')
|
|
|
+ re_ip_address = ip_address.findall(html)
|
|
|
port = re.compile('<td data-title="PORT">(.*?)</td>')
|
|
|
re_port = port.findall(html)
|
|
|
- for adress,port in zip(re_ip_adress, re_port):
|
|
|
- adress_port = adress+':'+port
|
|
|
- yield adress_port.replace(' ','')
|
|
|
+ for address,port in zip(re_ip_address, re_port):
|
|
|
+ address_port = address+':'+port
|
|
|
+ yield address_port.replace(' ','')
|
|
|
|
|
|
def crawl_xicidaili(self):
|
|
|
for i in range(1, 3):
|
|
|
@@ -179,12 +169,12 @@ class Crawler(object, metaclass=ProxyMetaclass):
|
|
|
trs = find_trs.findall(html)
|
|
|
for tr in trs:
|
|
|
find_ip = re.compile('<td>(\d+\.\d+\.\d+\.\d+)</td>')
|
|
|
- re_ip_adress = find_ip.findall(tr)
|
|
|
+ re_ip_address = find_ip.findall(tr)
|
|
|
find_port = re.compile('<td>(\d+)</td>')
|
|
|
re_port = find_port.findall(tr)
|
|
|
- for adress,port in zip(re_ip_adress, re_port):
|
|
|
- adress_port = adress+':'+port
|
|
|
- yield adress_port.replace(' ','')
|
|
|
+ for address,port in zip(re_ip_address, re_port):
|
|
|
+ address_port = address+':'+port
|
|
|
+ yield address_port.replace(' ','')
|
|
|
|
|
|
def crawl_ip3366(self):
|
|
|
for i in range(1, 4):
|
|
|
@@ -195,12 +185,12 @@ class Crawler(object, metaclass=ProxyMetaclass):
|
|
|
trs = find_tr.findall(html)
|
|
|
for s in range(1, len(trs)):
|
|
|
find_ip = re.compile('<td>(\d+\.\d+\.\d+\.\d+)</td>')
|
|
|
- re_ip_adress = find_ip.findall(trs[s])
|
|
|
+ re_ip_address = find_ip.findall(trs[s])
|
|
|
find_port = re.compile('<td>(\d+)</td>')
|
|
|
re_port = find_port.findall(trs[s])
|
|
|
- for adress,port in zip(re_ip_adress, re_port):
|
|
|
- adress_port = adress+':'+port
|
|
|
- yield adress_port.replace(' ','')
|
|
|
+ for address,port in zip(re_ip_address, re_port):
|
|
|
+ address_port = address+':'+port
|
|
|
+ yield address_port.replace(' ','')
|
|
|
|
|
|
def crawl_iphai(self):
|
|
|
start_url = 'http://www.iphai.com/'
|
|
|
@@ -211,9 +201,51 @@ class Crawler(object, metaclass=ProxyMetaclass):
|
|
|
print(len(trs))
|
|
|
for s in range(1, len(trs)):
|
|
|
find_ip = re.compile('<td>\s+(\d+\.\d+\.\d+\.\d+)\s+</td>', re.S)
|
|
|
- re_ip_adress = find_ip.findall(trs[s])
|
|
|
+ re_ip_address = find_ip.findall(trs[s])
|
|
|
find_port = re.compile('<td>\s+(\d+)\s+</td>', re.S)
|
|
|
re_port = find_port.findall(trs[s])
|
|
|
- for adress,port in zip(re_ip_adress, re_port):
|
|
|
- adress_port = adress+':'+port
|
|
|
- yield adress_port.replace(' ','')
|
|
|
+ for address,port in zip(re_ip_address, re_port):
|
|
|
+ address_port = address+':'+port
|
|
|
+ yield address_port.replace(' ','')
|
|
|
+
|
|
|
+ def crawl_89ip(self):
|
|
|
+ start_url = 'http://www.89ip.cn/apijk/?&tqsl=1000&sxa=&sxb=&tta=&ports=&ktip=&cf=1'
|
|
|
+ html = get_page(start_url)
|
|
|
+ if html:
|
|
|
+ find_ips = re.compile('(\d+\.\d+\.\d+\.\d+:\d+)', re.S)
|
|
|
+ ip_ports = find_ips.findall(html)
|
|
|
+ for address_port in ip_ports:
|
|
|
+ yield address_port
|
|
|
+
|
|
|
+ def crawl_data5u(self):
|
|
|
+ start_url = 'http://www.data5u.com/free/gngn/index.shtml'
|
|
|
+ headers = {
|
|
|
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
|
|
|
+ 'Accept-Encoding': 'gzip, deflate',
|
|
|
+ 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
|
|
|
+ 'Cache-Control': 'max-age=0',
|
|
|
+ 'Connection': 'keep-alive',
|
|
|
+ 'Cookie': 'JSESSIONID=47AA0C887112A2D83EE040405F837A86',
|
|
|
+ 'Host': 'www.data5u.com',
|
|
|
+ 'Referer': 'http://www.data5u.com/free/index.shtml',
|
|
|
+ 'Upgrade-Insecure-Requests': '1',
|
|
|
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36',
|
|
|
+ }
|
|
|
+ html = get_page(start_url, options=headers)
|
|
|
+ if html:
|
|
|
+ # ip_address = re.compile('<span><li>(\d+\.\d+\.\d+\.\d+)</li>.*?<li class=\"port HCAAA\">(\d.*?)</li>', re.S)
|
|
|
+ # re_ip_address = ip_address.findall(html)
|
|
|
+ # for address, port in re_ip_address:
|
|
|
+ # result = address + ':' + port
|
|
|
+ # yield result.replace(' ', '')
|
|
|
+ soup = bsp(html, 'lxml')
|
|
|
+ ul_l2s = soup.find_all('ul', class_='l2')
|
|
|
+ for l2 in ul_l2s:
|
|
|
+ address = l2.span.li.text
|
|
|
+ port = l2.find('li', class_='port').text
|
|
|
+ print(address, port)
|
|
|
+ result = address.strip() + ':' + port.strip()
|
|
|
+ yield result
|
|
|
+
|
|
|
+
|
|
|
+
|