-
Notifications
You must be signed in to change notification settings - Fork 98
Expand file tree
/
Copy pathbuildDatabase.sh
More file actions
executable file
·281 lines (242 loc) · 8.71 KB
/
buildDatabase.sh
File metadata and controls
executable file
·281 lines (242 loc) · 8.71 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
#!/bin/bash
set -euo pipefail
# Force default language for output sorting to be bytewise. Necessary to ensure uniformity amongst
# UNIX commands.
export LC_ALL=C
# By default, the latest Wikipedia dump will be downloaded. If a download date in the format
# YYYYMMDD is provided as the first argument, it will be used instead.
if [[ $# -eq 0 ]]; then
DOWNLOAD_DATE=$(wget -q -O- https://dumps.wikimedia.org/enwiki/ | grep -Po '\d{8}' | sort | tail -n1)
else
if [ ${#1} -ne 8 ]; then
echo "[ERROR] Invalid download date provided: $1"
exit 1
else
DOWNLOAD_DATE=$1
fi
fi
ROOT_DIR=`pwd`
OUT_DIR="dump"
DOWNLOAD_URL="https://dumps.wikimedia.org/enwiki/$DOWNLOAD_DATE"
TORRENT_URL="https://dump-torrents.toolforge.org/enwiki/$DOWNLOAD_DATE"
SHA1SUM_FILENAME="enwiki-$DOWNLOAD_DATE-sha1sums.txt"
REDIRECTS_FILENAME="enwiki-$DOWNLOAD_DATE-redirect.sql.gz"
PAGES_FILENAME="enwiki-$DOWNLOAD_DATE-page.sql.gz"
LINKS_FILENAME="enwiki-$DOWNLOAD_DATE-pagelinks.sql.gz"
# Make the output directory if it doesn't already exist and move to it
mkdir -p $OUT_DIR
pushd $OUT_DIR > /dev/null
echo "[INFO] Download date: $DOWNLOAD_DATE"
echo "[INFO] Download URL: $DOWNLOAD_URL"
echo "[INFO] Output directory: $OUT_DIR"
echo
##############################
# DOWNLOAD WIKIPEDIA DUMPS #
##############################
function download_file() {
if [ ! -f $2 ]; then
echo
if [ $1 != sha1sums ] && command -v aria2c > /dev/null; then
echo "[INFO] Downloading $1 file via torrent"
time aria2c --summary-interval=0 --console-log-level=warn --seed-time=0 \
"$TORRENT_URL/$2.torrent" 2>&1 | grep -v "ERROR\|Exception" || true
fi
if [ ! -f $2 ]; then
echo "[INFO] Downloading $1 file via wget"
time wget --progress=dot:giga "$DOWNLOAD_URL/$2"
fi
if [ $1 != sha1sums ]; then
echo
echo "[INFO] Verifying SHA-1 hash for $1 file"
time grep "$2" "$SHA1SUM_FILENAME" | sha1sum -c
if [ $? -ne 0 ]; then
echo
echo "[ERROR] Downloaded $1 file has incorrect SHA-1 hash"
rm $2
exit 1
fi
fi
else
echo "[WARN] Already downloaded $1 file"
fi
}
download_file "sha1sums" $SHA1SUM_FILENAME
download_file "redirects" $REDIRECTS_FILENAME
download_file "pages" $PAGES_FILENAME
download_file "links" $LINKS_FILENAME
##########################
# TRIM WIKIPEDIA DUMPS #
##########################
if [ ! -f redirects.txt.gz ]; then
echo
echo "[INFO] Trimming redirects file"
# Unzip
# Remove all lines that don't start with INSERT INTO...
# Split into individual records
# Only keep records in namespace 0
# Replace namespace with a tab
# Remove everything starting at the to page name's closing apostrophe
# Zip into output file
time pigz -dc $REDIRECTS_FILENAME \
| sed -n 's/^INSERT INTO `redirect` VALUES (//p' \
| sed -e 's/),(/\'$'\n/g' \
| egrep "^[0-9]+,0," \
| sed -e $"s/,0,'/\t/g" \
| sed -e "s/','.*//g" \
| pigz --fast > redirects.txt.gz.tmp
mv redirects.txt.gz.tmp redirects.txt.gz
else
echo "[WARN] Already trimmed redirects file"
fi
if [ ! -f pages.txt.gz ]; then
echo
echo "[INFO] Trimming pages file"
# Unzip
# Remove all lines that don't start with INSERT INTO...
# Split into individual records
# Only keep records in namespace 0
# Replace namespace with a tab
# Splice out the page title and whether or not the page is a redirect
# Zip into output file
time pigz -dc $PAGES_FILENAME \
| sed -n 's/^INSERT INTO `page` VALUES (//p' \
| sed -e 's/),(/\'$'\n/g' \
| egrep "^[0-9]+,0," \
| sed -e $"s/,0,'/\t/" \
| sed -e $"s/',[^,]*,\([01]\).*/\t\1/" \
| pigz --fast > pages.txt.gz.tmp
mv pages.txt.gz.tmp pages.txt.gz
else
echo "[WARN] Already trimmed pages file"
fi
if [ ! -f links.txt.gz ]; then
echo
echo "[INFO] Trimming links file"
# Unzip
# Remove all lines that don't start with INSERT INTO...
# Split into individual records
# Only keep records in namespace 0
# Replace namespace with a tab
# Remove everything starting at the to page name's closing apostrophe
# Zip into output file
time pigz -dc $LINKS_FILENAME \
| sed -n 's/^INSERT INTO `pagelinks` VALUES (//p' \
| sed -e 's/),(/\'$'\n/g' \
| egrep "^[0-9]+,0,.*,0$" \
| sed -e $"s/,0,'/\t/g" \
| sed -e "s/',0//g" \
| pigz --fast > links.txt.gz.tmp
mv links.txt.gz.tmp links.txt.gz
else
echo "[WARN] Already trimmed links file"
fi
###########################################
# REPLACE TITLES AND REDIRECTS IN FILES #
###########################################
if [ ! -f redirects.with_ids.txt.gz ]; then
echo
echo "[INFO] Replacing titles in redirects file"
time python "$ROOT_DIR/replace_titles_in_redirects_file.py" pages.txt.gz redirects.txt.gz \
| sort -S 100% -t $'\t' -k 1n,1n \
| pigz --fast > redirects.with_ids.txt.gz.tmp
mv redirects.with_ids.txt.gz.tmp redirects.with_ids.txt.gz
else
echo "[WARN] Already replaced titles in redirects file"
fi
if [ ! -f links.with_ids.txt.gz ]; then
echo
echo "[INFO] Replacing titles and redirects in links file"
time python "$ROOT_DIR/replace_titles_and_redirects_in_links_file.py" pages.txt.gz redirects.with_ids.txt.gz links.txt.gz \
| pigz --fast > links.with_ids.txt.gz.tmp
mv links.with_ids.txt.gz.tmp links.with_ids.txt.gz
else
echo "[WARN] Already replaced titles and redirects in links file"
fi
if [ ! -f pages.pruned.txt.gz ]; then
echo
echo "[INFO] Pruning pages which are marked as redirects but with no redirect"
time python "$ROOT_DIR/prune_pages_file.py" pages.txt.gz redirects.with_ids.txt.gz \
| pigz --fast > pages.pruned.txt.gz
else
echo "[WARN] Already pruned pages which are marked as redirects but with no redirect"
fi
#####################
# SORT LINKS FILE #
#####################
if [ ! -f links.sorted_by_source_id.txt.gz ]; then
echo
echo "[INFO] Sorting links file by source page ID"
time pigz -dc links.with_ids.txt.gz \
| sort -S 80% -t $'\t' -k 1n,1n \
| uniq \
| pigz --fast > links.sorted_by_source_id.txt.gz.tmp
mv links.sorted_by_source_id.txt.gz.tmp links.sorted_by_source_id.txt.gz
else
echo "[WARN] Already sorted links file by source page ID"
fi
if [ ! -f links.sorted_by_target_id.txt.gz ]; then
echo
echo "[INFO] Sorting links file by target page ID"
time pigz -dc links.with_ids.txt.gz \
| sort -S 80% -t $'\t' -k 2n,2n \
| uniq \
| pigz --fast > links.sorted_by_target_id.txt.gz.tmp
mv links.sorted_by_target_id.txt.gz.tmp links.sorted_by_target_id.txt.gz
else
echo "[WARN] Already sorted links file by target page ID"
fi
#############################
# GROUP SORTED LINKS FILE #
#############################
if [ ! -f links.grouped_by_source_id.txt.gz ]; then
echo
echo "[INFO] Grouping source links file by source page ID"
time pigz -dc links.sorted_by_source_id.txt.gz \
| awk -F '\t' '$1==last {printf "|%s",$2; next} NR>1 {print "";} {last=$1; printf "%s\t%s",$1,$2;} END{print "";}' \
| pigz --fast > links.grouped_by_source_id.txt.gz.tmp
mv links.grouped_by_source_id.txt.gz.tmp links.grouped_by_source_id.txt.gz
else
echo "[WARN] Already grouped source links file by source page ID"
fi
if [ ! -f links.grouped_by_target_id.txt.gz ]; then
echo
echo "[INFO] Grouping target links file by target page ID"
time pigz -dc links.sorted_by_target_id.txt.gz \
| awk -F '\t' '$2==last {printf "|%s",$1; next} NR>1 {print "";} {last=$2; printf "%s\t%s",$2,$1;} END{print "";}' \
| gzip > links.grouped_by_target_id.txt.gz
else
echo "[WARN] Already grouped target links file by target page ID"
fi
################################
# COMBINE GROUPED LINKS FILES #
################################
if [ ! -f links.with_counts.txt.gz ]; then
echo
echo "[INFO] Combining grouped links files"
time python "$ROOT_DIR/combine_grouped_links_files.py" links.grouped_by_source_id.txt.gz links.grouped_by_target_id.txt.gz \
| pigz --fast > links.with_counts.txt.gz.tmp
mv links.with_counts.txt.gz.tmp links.with_counts.txt.gz
else
echo "[WARN] Already combined grouped links files"
fi
############################
# CREATE SQLITE DATABASE #
############################
if [ ! -f sdow.sqlite ]; then
echo
echo "[INFO] Creating redirects table"
time pigz -dc redirects.with_ids.txt.gz | sqlite3 sdow.sqlite ".read $ROOT_DIR/../sql/createRedirectsTable.sql"
echo
echo "[INFO] Creating pages table"
time pigz -dc pages.pruned.txt.gz | sqlite3 sdow.sqlite ".read $ROOT_DIR/../sql/createPagesTable.sql"
echo
echo "[INFO] Creating links table"
time pigz -dc links.with_counts.txt.gz | sqlite3 sdow.sqlite ".read $ROOT_DIR/../sql/createLinksTable.sql"
echo
echo "[INFO] Compressing SQLite file"
time pigz --best --keep sdow.sqlite
else
echo "[WARN] Already created SQLite database"
fi
echo
echo "[INFO] All done!"