1 | =head1 NAME |
1 | =head1 NAME |
2 | |
2 | |
3 | CFPlus::DB - async. database access for cfplus |
3 | CFPlus::DB - async. database and filesystem access for cfplus |
4 | |
4 | |
5 | =head1 SYNOPSIS |
5 | =head1 SYNOPSIS |
6 | |
6 | |
7 | use CFPlus::DB; |
7 | use CFPlus::DB; |
8 | |
8 | |
… | |
… | |
16 | |
16 | |
17 | use strict; |
17 | use strict; |
18 | use utf8; |
18 | use utf8; |
19 | |
19 | |
20 | use Carp (); |
20 | use Carp (); |
21 | use AnyEvent (); |
|
|
22 | use Storable (); # finally |
21 | use Storable (); |
|
|
22 | use Config; |
|
|
23 | |
|
|
24 | use CFPlus; |
|
|
25 | |
|
|
26 | our $DB_HOME = "$Crossfire::VARDIR/cfplus-$BerkeleyDB::db_version-$Config{archname}"; |
|
|
27 | |
|
|
28 | sub path_of_res($) { |
|
|
29 | utf8::downgrade $_[0]; # bug in unpack "H*" |
|
|
30 | "$DB_HOME/res-data-" . unpack "H*", $_[0] |
|
|
31 | } |
23 | |
32 | |
24 | sub sync { |
33 | sub sync { |
25 | # for debugging |
34 | # for debugging |
26 | #CFPlus::DB::Server::req (sync => sub { }); |
35 | #CFPlus::DB::Server::req (sync => sub { }); |
27 | CFPlus::DB::Server::sync (); |
36 | CFPlus::DB::Server::sync (); |
28 | } |
37 | } |
29 | |
38 | |
|
|
39 | sub exists($$$) { |
|
|
40 | CFPlus::DB::Server::req (exists => @_); |
|
|
41 | } |
|
|
42 | |
30 | sub get($$$) { |
43 | sub get($$$) { |
31 | CFPlus::DB::Server::req (get => @_); |
44 | CFPlus::DB::Server::req (get => @_); |
32 | } |
45 | } |
33 | |
46 | |
34 | sub put($$$$) { |
47 | sub put($$$$) { |
35 | CFPlus::DB::Server::req (put => @_); |
48 | CFPlus::DB::Server::req (put => @_); |
36 | } |
49 | } |
37 | |
50 | |
|
|
51 | sub unlink($$) { |
|
|
52 | CFPlus::DB::Server::req (unlink => @_); |
|
|
53 | } |
|
|
54 | |
|
|
55 | sub read_file($$) { |
|
|
56 | CFPlus::DB::Server::req (read_file => @_); |
|
|
57 | } |
|
|
58 | |
|
|
59 | sub write_file($$$) { |
|
|
60 | CFPlus::DB::Server::req (write_file => @_); |
|
|
61 | } |
|
|
62 | |
|
|
63 | sub prefetch_file($$$) { |
|
|
64 | CFPlus::DB::Server::req (prefetch_file => @_); |
|
|
65 | } |
|
|
66 | |
|
|
67 | sub logprint($$$) { |
|
|
68 | CFPlus::DB::Server::req (logprint => @_); |
|
|
69 | } |
|
|
70 | |
38 | our $tilemap; |
71 | our $tilemap; |
39 | |
72 | |
40 | sub get_tile_id_sync($) { |
73 | sub get_tile_id_sync($) { |
41 | my ($hash) = @_; |
74 | my ($name) = @_; |
42 | |
75 | |
43 | # fetch the full face table first |
76 | # fetch the full face table first |
44 | unless ($tilemap) { |
77 | unless ($tilemap) { |
45 | CFPlus::DB::Server::req (table => facemap => sub { $tilemap = $_[0] }); |
78 | CFPlus::DB::Server::req (table => facemap => sub { |
|
|
79 | $tilemap = $_[0]; |
|
|
80 | delete $tilemap->{id}; |
|
|
81 | my %maptile = reverse %$tilemap;#d# |
|
|
82 | if ((scalar keys %$tilemap) != (scalar keys %maptile)) {#d# |
|
|
83 | $tilemap = { };#d# |
|
|
84 | CFPlus::error "FATAL: facemap is not a 1:1 mapping, please report this and delete your $DB_HOME directory!\n";#d# |
|
|
85 | }#d# |
|
|
86 | }); |
46 | sync; |
87 | sync; |
47 | } |
88 | } |
48 | |
89 | |
49 | $tilemap->{$hash} ||= do { |
90 | $tilemap->{$name} ||= do { |
50 | my $id; |
91 | my $id; |
51 | CFPlus::DB::Server::req (get_tile_id => $hash, sub { $id = $_[0] }); |
92 | CFPlus::DB::Server::req (get_tile_id => $name, sub { $id = $_[0] }); |
52 | sync; |
93 | sync; |
53 | $id |
94 | $id |
54 | } |
95 | } |
55 | } |
96 | } |
56 | |
97 | |
57 | package CFPlus::DB::Server; |
98 | package CFPlus::DB::Server; |
58 | |
99 | |
59 | use strict; |
100 | use strict; |
60 | |
101 | |
|
|
102 | use EV (); |
61 | use Fcntl; |
103 | use Fcntl; |
62 | use BerkeleyDB; |
104 | use BerkeleyDB; |
63 | |
105 | |
64 | our $DB_HOME = "$Crossfire::VARDIR/cfplus"; |
|
|
65 | our $DB_ENV; |
106 | our $DB_ENV; |
66 | our $DB_STATE; |
107 | our $DB_STATE; |
67 | our %DB_TABLE; |
108 | our %DB_TABLE; |
68 | |
109 | |
69 | sub open_db { |
110 | sub open_db { |
… | |
… | |
77 | -Cachesize => 8_000_000, |
118 | -Cachesize => 8_000_000, |
78 | -ErrFile => "$DB_HOME/errorlog.txt", |
119 | -ErrFile => "$DB_HOME/errorlog.txt", |
79 | # -ErrPrefix => "DATABASE", |
120 | # -ErrPrefix => "DATABASE", |
80 | -Verbose => 1, |
121 | -Verbose => 1, |
81 | -Flags => DB_CREATE | DB_RECOVER | DB_INIT_MPOOL | DB_INIT_LOCK | DB_INIT_TXN | $recover, |
122 | -Flags => DB_CREATE | DB_RECOVER | DB_INIT_MPOOL | DB_INIT_LOCK | DB_INIT_TXN | $recover, |
82 | -SetFlags => DB_AUTO_COMMIT | DB_LOG_AUTOREMOVE, |
123 | -SetFlags => DB_AUTO_COMMIT | DB_LOG_AUTOREMOVE | DB_TXN_WRITE_NOSYNC, |
83 | or die "unable to create/open database home $DB_HOME: $BerkeleyDB::Error"; |
124 | or die "unable to create/open database home $DB_HOME: $BerkeleyDB::Error"; |
84 | |
125 | |
85 | 1 |
126 | 1 |
86 | } |
127 | } |
87 | |
128 | |
… | |
… | |
95 | -Env => $DB_ENV, |
136 | -Env => $DB_ENV, |
96 | -Filename => $table, |
137 | -Filename => $table, |
97 | # -Filename => "database", |
138 | # -Filename => "database", |
98 | # -Subname => $table, |
139 | # -Subname => $table, |
99 | -Property => DB_CHKSUM, |
140 | -Property => DB_CHKSUM, |
100 | -Flags => DB_CREATE | DB_UPGRADE, |
141 | -Flags => DB_AUTO_COMMIT | DB_CREATE | DB_UPGRADE, |
101 | or die "unable to create/open database table $_[0]: $BerkeleyDB::Error" |
142 | or die "unable to create/open database table $_[0]: $BerkeleyDB::Error" |
102 | } |
143 | } |
103 | } |
144 | } |
104 | |
|
|
105 | our $SYNC_INTERVAL = 6; |
|
|
106 | |
145 | |
107 | our %CB; |
146 | our %CB; |
108 | our $FH; |
147 | our $FH; |
109 | our $ID = "aaa0"; |
148 | our $ID = "aaa0"; |
110 | our ($fh_r_watcher, $fh_w_watcher); |
149 | our ($fh_r_watcher, $fh_w_watcher); |
111 | our $sync_timer; |
150 | our $sync_timer; |
112 | our $write_buf; |
151 | our $write_buf; |
113 | our $read_buf; |
152 | our $read_buf; |
114 | |
153 | |
|
|
154 | our $SYNC = EV::timer_ns 0, 60, sub { |
|
|
155 | $_[0]->stop; |
|
|
156 | CFPlus::DB::Server::req (sync => sub { }); |
|
|
157 | }; |
|
|
158 | |
115 | sub fh_write { |
159 | sub fh_write { |
116 | my $len = syswrite $FH, $write_buf; |
160 | my $len = syswrite $FH, $write_buf; |
117 | |
161 | |
118 | substr $write_buf, 0, $len, ""; |
162 | substr $write_buf, 0, $len, ""; |
119 | |
163 | |
120 | undef $fh_w_watcher |
164 | $fh_w_watcher->stop |
121 | unless length $write_buf; |
165 | unless length $write_buf; |
122 | } |
166 | } |
123 | |
167 | |
124 | sub fh_read { |
168 | sub fh_read { |
125 | my $status = sysread $FH, $read_buf, 16384, length $read_buf; |
169 | my $status = sysread $FH, $read_buf, 16384, length $read_buf; |
… | |
… | |
161 | |
205 | |
162 | my $id = ++$ID; |
206 | my $id = ++$ID; |
163 | $write_buf .= pack "N/a*", Storable::freeze [$id, $type, @args]; |
207 | $write_buf .= pack "N/a*", Storable::freeze [$id, $type, @args]; |
164 | $CB{$id} = $cb; |
208 | $CB{$id} = $cb; |
165 | |
209 | |
166 | $fh_w_watcher = AnyEvent->io (fh => $FH, poll => 'w', cb => \&fh_write); |
210 | $fh_w_watcher->start; |
167 | } |
211 | $SYNC->again unless $SYNC->is_active; |
168 | |
|
|
169 | sub sync_tick { |
|
|
170 | req "sync", sub { }; |
|
|
171 | $sync_timer = AnyEvent->timer (after => $SYNC_INTERVAL, cb => \&sync_tick); |
|
|
172 | } |
212 | } |
173 | |
213 | |
174 | sub do_sync { |
214 | sub do_sync { |
175 | $DB_ENV->txn_checkpoint (0, 0, 0); |
215 | $DB_ENV->txn_checkpoint (0, 0, 0); |
176 | () |
216 | () |
177 | } |
217 | } |
178 | |
218 | |
|
|
219 | sub do_exists { |
|
|
220 | my ($db, $key) = @_; |
|
|
221 | |
|
|
222 | utf8::downgrade $key; |
|
|
223 | my $data; |
|
|
224 | (table $db)->db_get ($key, $data) == 0 |
|
|
225 | ? length $data |
|
|
226 | : () |
|
|
227 | } |
|
|
228 | |
179 | sub do_get { |
229 | sub do_get { |
180 | my ($db, $key) = @_; |
230 | my ($db, $key) = @_; |
181 | |
231 | |
|
|
232 | utf8::downgrade $key; |
182 | my $data; |
233 | my $data; |
183 | (table $db)->db_get ($key, $data) == 0 |
234 | (table $db)->db_get ($key, $data) == 0 |
184 | ? $data |
235 | ? $data |
185 | : () |
236 | : () |
186 | } |
237 | } |
187 | |
238 | |
188 | sub do_put { |
239 | sub do_put { |
189 | my ($db, $key, $data) = @_; |
240 | my ($db, $key, $data) = @_; |
190 | |
241 | |
|
|
242 | utf8::downgrade $key; |
|
|
243 | utf8::downgrade $data; |
191 | (table $db)->db_put ($key => $data) |
244 | (table $db)->db_put ($key => $data) |
192 | } |
245 | } |
193 | |
246 | |
194 | sub do_table { |
247 | sub do_table { |
195 | my ($db) = @_; |
248 | my ($db) = @_; |
… | |
… | |
203 | |
256 | |
204 | \%kv |
257 | \%kv |
205 | } |
258 | } |
206 | |
259 | |
207 | sub do_get_tile_id { |
260 | sub do_get_tile_id { |
208 | my ($hash) = @_; |
261 | my ($name) = @_; |
209 | |
262 | |
210 | my $id; |
263 | my $id; |
211 | my $table = table "facemap"; |
264 | my $table = table "facemap"; |
212 | |
265 | |
213 | return $id |
266 | return $id |
214 | if $table->db_get ($hash, $id) == 0; |
267 | if $table->db_get ($name, $id) == 0; |
215 | |
268 | |
216 | for (1..100) { |
269 | for (1..100) { |
217 | my $txn = $DB_ENV->txn_begin; |
270 | my $txn = $DB_ENV->txn_begin; |
218 | my $status = $table->db_get (id => $id); |
271 | my $status = $table->db_get (id => $id); |
219 | if ($status == 0 || $status == BerkeleyDB::DB_NOTFOUND) { |
272 | if ($status == 0 || $status == BerkeleyDB::DB_NOTFOUND) { |
220 | $id = ($id || 64) + 1; |
273 | $id = ($id || 64) + 1; |
221 | if ($table->db_put (id => $id) == 0 |
274 | if ($table->db_put (id => $id) == 0 |
222 | && $table->db_put ($hash => $id) == 0) { |
275 | && $table->db_put ($name => $id) == 0) { |
223 | $txn->txn_commit; |
276 | $txn->txn_commit; |
224 | |
277 | |
225 | return $id; |
278 | return $id; |
226 | } |
279 | } |
227 | } |
280 | } |
228 | $txn->txn_abort; |
281 | $txn->txn_abort; |
|
|
282 | select undef, undef, undef, 0.01 * rand; |
229 | } |
283 | } |
230 | |
284 | |
231 | die "maximum number of transaction retries reached - database problems?"; |
285 | die "maximum number of transaction retries reached - database problems?"; |
|
|
286 | } |
|
|
287 | |
|
|
288 | sub do_unlink { |
|
|
289 | unlink $_[0]; |
|
|
290 | } |
|
|
291 | |
|
|
292 | sub do_read_file { |
|
|
293 | my ($path) = @_; |
|
|
294 | |
|
|
295 | utf8::downgrade $path; |
|
|
296 | open my $fh, "<:raw", $path |
|
|
297 | or return; |
|
|
298 | sysread $fh, my $buf, -s $fh; |
|
|
299 | |
|
|
300 | $buf |
|
|
301 | } |
|
|
302 | |
|
|
303 | sub do_write_file { |
|
|
304 | my ($path, $data) = @_; |
|
|
305 | |
|
|
306 | utf8::downgrade $path; |
|
|
307 | utf8::downgrade $data; |
|
|
308 | open my $fh, ">:raw", $path |
|
|
309 | or return; |
|
|
310 | syswrite $fh, $data; |
|
|
311 | close $fh; |
|
|
312 | |
|
|
313 | 1 |
|
|
314 | } |
|
|
315 | |
|
|
316 | sub do_prefetch_file { |
|
|
317 | my ($path, $size) = @_; |
|
|
318 | |
|
|
319 | utf8::downgrade $path; |
|
|
320 | open my $fh, "<:raw", $path |
|
|
321 | or return; |
|
|
322 | sysread $fh, my $buf, $size; |
|
|
323 | |
|
|
324 | 1 |
|
|
325 | } |
|
|
326 | |
|
|
327 | our %LOG_FH; |
|
|
328 | |
|
|
329 | sub do_logprint { |
|
|
330 | my ($path, $line) = @_; |
|
|
331 | |
|
|
332 | $LOG_FH{$path} ||= do { |
|
|
333 | open my $fh, ">>:utf8", $path |
|
|
334 | or warn "Couldn't open logfile $path: $!"; |
|
|
335 | |
|
|
336 | $fh->autoflush (1); |
|
|
337 | |
|
|
338 | $fh |
|
|
339 | }; |
|
|
340 | |
|
|
341 | my ($sec, $min, $hour, $mday, $mon, $year) = localtime time; |
|
|
342 | |
|
|
343 | my $ts = sprintf "%04d-%02d-%02d %02d:%02d:%02d", |
|
|
344 | $year + 1900, $mon + 1, $mday, $hour, $min, $sec; |
|
|
345 | |
|
|
346 | print { $LOG_FH{$path} } "$ts $line\n" |
232 | } |
347 | } |
233 | |
348 | |
234 | sub run { |
349 | sub run { |
235 | ($FH, my $fh) = CFPlus::socketpipe; |
350 | ($FH, my $fh) = CFPlus::socketpipe; |
236 | |
351 | |
… | |
… | |
238 | my $oldfh = select $fh; $| = 1; select $oldfh; |
353 | my $oldfh = select $fh; $| = 1; select $oldfh; |
239 | |
354 | |
240 | my $pid = fork; |
355 | my $pid = fork; |
241 | |
356 | |
242 | if (defined $pid && !$pid) { |
357 | if (defined $pid && !$pid) { |
|
|
358 | local $SIG{QUIT}; |
243 | local $SIG{__DIE__}; |
359 | local $SIG{__DIE__}; |
|
|
360 | local $SIG{__WARN__}; |
244 | eval { |
361 | eval { |
245 | close $FH; |
362 | close $FH; |
246 | |
363 | |
247 | unless (eval { open_db }) { |
364 | unless (eval { open_db }) { |
248 | File::Path::rmtree $DB_HOME; |
365 | eval { File::Path::rmtree $DB_HOME }; |
249 | open_db; |
366 | open_db; |
250 | } |
367 | } |
251 | |
368 | |
252 | while () { |
369 | while () { |
253 | 4 == read $fh, my $len, 4 |
370 | 4 == read $fh, my $len, 4 |
… | |
… | |
261 | my ($id, $type, @args) = @$req; |
378 | my ($id, $type, @args) = @$req; |
262 | my $cb = CFPlus::DB::Server->can ("do_$type") |
379 | my $cb = CFPlus::DB::Server->can ("do_$type") |
263 | or die "$type: unknown database request type\n"; |
380 | or die "$type: unknown database request type\n"; |
264 | my $res = pack "N/a*", Storable::freeze [$id, $cb->(@args)]; |
381 | my $res = pack "N/a*", Storable::freeze [$id, $cb->(@args)]; |
265 | (syswrite $fh, $res) == length $res |
382 | (syswrite $fh, $res) == length $res |
266 | or die; |
383 | or die "DB::write: $!"; |
267 | } |
384 | } |
268 | }; |
385 | }; |
269 | |
386 | |
270 | my $error = $@; |
387 | my $error = $@; |
271 | |
388 | |
272 | eval { |
389 | eval { |
|
|
390 | $DB_ENV->txn_checkpoint (0, 0, 0); |
|
|
391 | |
273 | undef %DB_TABLE; |
392 | undef %DB_TABLE; |
274 | undef $DB_ENV; |
393 | undef $DB_ENV; |
275 | |
394 | |
276 | Storable::store_fd [die => $error], $fh; |
395 | Storable::store_fd [die => $error], $fh; |
277 | }; |
396 | }; |
278 | |
397 | |
279 | CFPlus::_exit 0; |
398 | CFPlus::_exit 0; |
280 | } |
399 | } |
281 | |
400 | |
282 | close $fh; |
401 | close $fh; |
283 | fcntl $FH, F_SETFL, O_NONBLOCK; |
402 | CFPlus::fh_nonblocking $FH, 1; |
284 | |
403 | |
285 | $CB{die} = sub { die shift }; |
404 | $CB{die} = sub { die shift }; |
286 | |
405 | |
287 | $fh_r_watcher = AnyEvent->io (fh => $FH, poll => 'r', cb => \&fh_read); |
406 | $fh_r_watcher = EV::io $FH, EV::READ , \&fh_read; |
|
|
407 | $fh_w_watcher = EV::io $FH, EV::WRITE, \&fh_write; |
|
|
408 | $SYNC->again unless $SYNC->is_active; |
|
|
409 | } |
288 | |
410 | |
289 | sync_tick; |
411 | sub stop { |
|
|
412 | close $FH; |
290 | } |
413 | } |
291 | |
414 | |
292 | 1; |
415 | 1; |
293 | |
416 | |
294 | =back |
417 | =back |