def get_tweet_count(term):
total_tweet_count = 0
page = 1
while True:
url = 'http://search.twitter.com/search.json?q='
+ urllib.quote(term) + '&rpp=100&page=' + str(page)
response = urllib2.urlopen(url)
json_content = response.read()
tweets = json.loads(json_content)['results']
total_tweet_count += len(tweets)
# Are we at the last page or have we run out of pages?
if len(tweets) < 100 or page >= 15:
break
max_id = tweets[0]['id_str']
page += 1
# Wait so twitter doesn't get annoyed with us
time.sleep(1)
return total_tweet_count
This script I adaptated from code on GitHub.