I am self-teaching myself Python and came up with building a simple web-crawler engine. the codes are below,
def find_next_url(page):
start_of_url_line = page.find('<a href')
if start_of_url_line == -1:
return None, 0
else:
start_of_url = page.find('"http', start_of_url_line)
if start_of_url == -1:
return None, 0
else:
end_of_url = page.find('"', start_of_url + 1)
one_url = page[start_of_url + 1 : end_of_url]
return one_url, end_of_url
def get_all_url(page):
p = []
while True:
url, end_pos = find_next_url(page)
if url:
p.append(url)
page = page[end_pos + 1 : ]
else:
break
return p
def union(a, b):
for e in b:
if e not in a:
a.append(e)
return a
def webcrawl(seed):
tocrawl = [seed]
crawled = []
while True:
page = tocrawl.pop()
if page not in crawled:
import urllib.request
intpage = urllib.request.urlopen(page).read()
openpage = str(intpage)
union(tocrawl, get_all_url(openpage))
crawled.append(page)
return crawled
However I am always getting HTTP 403 error.