1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import facebook
import pandas as pd
import requests

token = ''

fields = [
'id',
'name',
'username',
'about',
'website',
'link',
'category',
'checkins',
'description',
'fan_count',
'talking_about_count',
'price_range',
'is_always_open',
#'featured_video',
#'founded',
#'general_info',
#'hours',
#'location',
#'mission',
#'phone',
#'products',
#'restaurant_services',
#'single_line_address',
]
fields = ','.join(fields)

exclude = [
'Apartment & Condo Building',
'Home',
'Hotel',
'Hotel & Lodging',
'Inn',
'Men\'s Clothing Store',
'Night Market',
'Parking Garage / Lot',
'Performance & Event Venue',
'Real Estate',
'Region',
'Shopping & Retail',
'Tour Agency',
'Vacation Home Rental',
]

graph = facebook.GraphAPI(access_token = token,version='2.10')

fc = graph.search(type='place',q='逢甲+夜市',center="24.1759238,120.6456042",distance=1000,fields=fields)

pagelist = []

while True:
if 'data' in fc:
pagelist.extend(fc['data'])
else:
break
if 'paging' in fc:
if 'next' in fc['paging']:
fc = requests.get(fc['paging']['next']).json()
else:
break


pagelist = [page for page in pagelist if page['is_always_open']!=True]
pagelist = [page for page in pagelist if page['category'] not in exclude]


writer = pd.ExcelWriter('fb.xlsx')
fcdata = pd.DataFrame(pagelist)
fcdata.to_excel(writer,'pages')
writer.save()