webcrawler.py 831 B

123456789101112131415161718192021222324252627282930
  1. #!/usr/bin/env python
  2. """
  3. This is a simple web "crawler" that fetches a bunch of urls using a pool to
  4. control the number of outbound connections. It has as many simultaneously open
  5. connections as coroutines in the pool.
  6. The prints in the body of the fetch function are there to demonstrate that the
  7. requests are truly made in parallel.
  8. """
  9. import eventlet
  10. from eventlet.green import urllib2
  11. urls = [
  12. "https://www.google.com/intl/en_ALL/images/logo.gif",
  13. "http://python.org/images/python-logo.gif",
  14. "http://us.i1.yimg.com/us.yimg.com/i/ww/beta/y3.gif",
  15. ]
  16. def fetch(url):
  17. print("opening", url)
  18. body = urllib2.urlopen(url).read()
  19. print("done with", url)
  20. return url, body
  21. pool = eventlet.GreenPool(200)
  22. for url, body in pool.imap(fetch, urls):
  23. print("got body from", url, "of length", len(body))