I am using Spyder on a Mac and the Python version on Spyder is 2.7. I had been using the following code a few months ago to scrape tweets, but now I find that it no longer works. First, I could no longer use:
from urllib.request import url open
and now use
from urllib2 import url open
However, I am unable to run the code below and get the following error: "with open('%s_tweets.csv' % screen_name, 'w', newline='', encoding='utf-8-sig') as f: TypeError: file() takes at most 3 arguments (4 given)"
import sys
from urllib2 import urlopen
default_encoding = 'utf-8'
import tweepy #https://github.com/tweepy/tweepy
import csv
#Twitter API credentials
consumer_key = ""
consumer_secret = ""
access_key = ""
access_secret = ""
screenNamesList = []
def redirect(url):
page = urlopen(url)
return page.geturl()
def get_all_tweets(screen_name):
#Twitter only allows access to a users most recent 3240 tweets with this method
#authorize twitter, initialize tweepy
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth, wait_on_rate_limit = True)
#initialize a list to hold all the tweepy Tweets
alltweets = []
#make initial request for most recent tweets (200 is the maximum allowed count)
new_tweets = api.user_timeline(screen_name = screen_name,count=200)
#save most recent tweets
alltweets.extend(new_tweets)
#save the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
#keep grabbing tweets until there are no tweets left to grab
while len(new_tweets) > 0:
#print "getting tweets before %s" % (oldest)
#all subsiquent requests use the max_id param to prevent duplicates
new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)
#save most recent tweets
alltweets.extend(new_tweets)
#update the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
#print "...%s tweets downloaded so far" % (len(alltweets))
#transform the tweepy tweets into a 2D array that will populate the csv
outtweets = [[tweet.id_str, tweet.created_at, tweet.text, tweet.retweet_count, tweet.coordinates, tweet.favorite_count, tweet.author.followers_count, tweet.author.description, tweet.author.location, tweet.author.name] for tweet in alltweets]
#write the csv
with open('%s_tweets.csv' % screen_name, 'w', newline='', encoding='utf-8-sig') as f:
writer = csv.writer(f)
writer.writerow(["id", "created_at", "text", "retweet_count", "coordinates", "favorite_count", "followers_count", "description", "location", "name"])
writer.writerows(outtweets)
pass
if __name__ == '__main__':
#pass in the username of the account you want to download
for i, user in enumerate(screenNamesList):
get_all_tweets(screenNamesList[i])
i+=1
This code is intended for python 3, where open
gets new parameters:
In python 2, there are only 3 parameters possible:
open(name[, mode[, buffering]])
buffering
isn't what you want. The others are nowhere to be found.
You can workaround this by using
with open('%s_tweets.csv' % screen_name, 'wb') as f:
opening the handle in binary fixes the "blank line" bug of csv
module. With python 3 (only the "old" versions, newest version doesn't need that), you have to pass newline=""
since you cannot open a csv file as binary.
To handle both encoding and newline, you could just do as described here:
from io import open
and leave the rest of your code unchanged. Well, almost, you have to prefix unicode for your title like this:
writer.writerow([u"id", u"created_at", u"text", u"retweet_count", u"coordinates", u"favorite_count", u"followers_count", u"description", u"location", u"name"])