| 123456789101112131415161718192021222324252627282930 |
- #!/usr/bin/env python
- """
- This is a simple web "crawler" that fetches a bunch of urls using a pool to
- control the number of outbound connections. It has as many simultaneously open
- connections as coroutines in the pool.
- The prints in the body of the fetch function are there to demonstrate that the
- requests are truly made in parallel.
- """
- import eventlet
- from eventlet.green import urllib2
- urls = [
- "https://www.google.com/intl/en_ALL/images/logo.gif",
- "http://python.org/images/python-logo.gif",
- "http://us.i1.yimg.com/us.yimg.com/i/ww/beta/y3.gif",
- ]
- def fetch(url):
- print("opening", url)
- body = urllib2.urlopen(url).read()
- print("done with", url)
- return url, body
- pool = eventlet.GreenPool(200)
- for url, body in pool.imap(fetch, urls):
- print("got body from", url, "of length", len(body))
|