1 | =head1 NAME |
1 | =head1 NAME |
2 | |
2 | |
3 | CFPlus::DB - async. database and filesystem access for cfplus |
3 | DC::DB - async. database and filesystem access for deliantra |
4 | |
4 | |
5 | =head1 SYNOPSIS |
5 | =head1 SYNOPSIS |
6 | |
6 | |
7 | use CFPlus::DB; |
7 | use DC::DB; |
8 | |
8 | |
9 | =head1 DESCRIPTION |
9 | =head1 DESCRIPTION |
10 | |
10 | |
11 | =over 4 |
11 | =over 4 |
12 | |
12 | |
13 | =cut |
13 | =cut |
14 | |
14 | |
15 | package CFPlus::DB; |
15 | package DC::DB; |
16 | |
16 | |
17 | use strict; |
17 | use strict; |
18 | use utf8; |
18 | use utf8; |
19 | |
19 | |
|
|
20 | use File::Path (); |
20 | use Carp (); |
21 | use Carp (); |
21 | use Storable (); |
22 | use Storable (); |
|
|
23 | use AnyEvent::Util (); |
22 | use Config; |
24 | use Config; |
23 | use BDB; |
25 | use BDB; |
|
|
26 | use Fcntl (); |
24 | |
27 | |
25 | use CFPlus; |
28 | use DC; |
26 | |
29 | |
27 | our $DB_HOME = "$Crossfire::VARDIR/cfplus-" . BDB::VERSION . "-$Config{archname}"; |
30 | our $ODBDIR = "cfplus-" . BDB::VERSION_MAJOR . "." . BDB::VERSION_MINOR . "-$Config{archname}"; |
|
|
31 | our $DBDIR = "client-" . BDB::VERSION_MAJOR . "." . BDB::VERSION_MINOR . "-$Config{archname}"; |
|
|
32 | our $DB_HOME = "$Deliantra::VARDIR/$DBDIR"; |
|
|
33 | |
|
|
34 | sub FIRST_TILE_ID () { 64 } |
|
|
35 | |
|
|
36 | unless (-d $DB_HOME) { |
|
|
37 | if (-d "$Deliantra::VARDIR/$ODBDIR") { |
|
|
38 | rename "$Deliantra::VARDIR/$ODBDIR", $DB_HOME; |
|
|
39 | print STDERR "INFO: moved old database from $Deliantra::VARDIR/$ODBDIR to $DB_HOME\n"; |
|
|
40 | } elsif (-d "$Deliantra::OLDDIR/$ODBDIR") { |
|
|
41 | rename "$Deliantra::OLDDIR/$DBDIR", $DB_HOME; |
|
|
42 | print STDERR "INFO: moved old database from $Deliantra::OLDDIR/$ODBDIR to $DB_HOME\n"; |
|
|
43 | } else { |
|
|
44 | File::Path::mkpath [$DB_HOME] |
|
|
45 | or die "unable to create database directory $DB_HOME: $!"; |
|
|
46 | } |
|
|
47 | } |
|
|
48 | |
|
|
49 | BDB::max_poll_time 0.03; |
|
|
50 | BDB::max_parallel 1; |
28 | |
51 | |
29 | our $DB_ENV; |
52 | our $DB_ENV; |
|
|
53 | our $DB_ENV_FH; |
30 | our $DB_STATE; |
54 | our $DB_STATE; |
31 | our %DB_TABLE; |
55 | our %DB_TABLE; |
|
|
56 | our $TILE_SEQ; |
32 | |
57 | |
|
|
58 | sub all_databases { |
|
|
59 | opendir my $fh, $DB_HOME |
|
|
60 | or return; |
|
|
61 | |
|
|
62 | grep !/^(?:\.|log\.|_)/, readdir $fh |
|
|
63 | } |
|
|
64 | |
|
|
65 | sub try_verify_env($) { |
|
|
66 | my ($env) = @_; |
|
|
67 | |
|
|
68 | open my $lock, "+>$DB_HOME/__lock" |
|
|
69 | or die "__lock: $!"; |
|
|
70 | |
|
|
71 | flock $lock, &Fcntl::LOCK_EX |
|
|
72 | or die "flock: $!"; |
|
|
73 | |
|
|
74 | # we look at the __db.register env file that has been created by now |
|
|
75 | # and check for the number of registered processes - if there is |
|
|
76 | # only one, we verify all databases, otherwise we skip this |
|
|
77 | # we MUST NOT close the filehandle as longa swe keep the env open, as |
|
|
78 | # this destroys the record locks on it. |
|
|
79 | open $DB_ENV_FH, "<$DB_HOME/__db.register" |
|
|
80 | or die "__db.register: $!"; |
|
|
81 | |
|
|
82 | # __db.register contains one record per process, with X signifying |
|
|
83 | # empty records (of course, this is completely private to bdb...) |
|
|
84 | my $count = grep /^[^X]/, <$DB_ENV_FH>; |
|
|
85 | |
|
|
86 | if ($count == 1) { |
|
|
87 | # if any databases are corrupted, we simply delete all of them |
|
|
88 | |
|
|
89 | for (all_databases) { |
|
|
90 | my $dbh = db_create $env |
|
|
91 | or last; |
|
|
92 | |
|
|
93 | # a failed verify will panic the environment, which is fine with us |
|
|
94 | db_verify $dbh, "$DB_HOME/$_"; |
|
|
95 | |
|
|
96 | return if $!; # nuke database and recreate if verification failure |
|
|
97 | } |
|
|
98 | |
|
|
99 | } |
|
|
100 | |
|
|
101 | # close probably cleans those up, but we also want to run on windows, |
|
|
102 | # so better be safe. |
|
|
103 | flock $lock, &Fcntl::LOCK_UN |
|
|
104 | or die "funlock: $!"; |
|
|
105 | |
|
|
106 | 1 |
|
|
107 | } |
|
|
108 | |
33 | sub open_db { |
109 | sub try_open_db { |
34 | mkdir $DB_HOME, 0777; |
110 | File::Path::mkpath [$DB_HOME]; |
35 | |
111 | |
|
|
112 | undef $DB_ENV; |
|
|
113 | undef $DB_ENV_FH; |
|
|
114 | |
36 | $DB_ENV = db_env_create; |
115 | my $env = db_env_create; |
37 | |
116 | |
38 | $DB_ENV->set_errfile (\*STDERR); |
117 | $env->set_errfile (\*STDERR); |
39 | $DB_ENV->set_msgfile (\*STDERR); |
118 | $env->set_msgfile (\*STDERR); |
40 | $DB_ENV->set_verbose (-1, 1); |
119 | $env->set_verbose (-1, 1); |
41 | |
120 | |
42 | $DB_ENV->set_flags (BDB::AUTO_COMMIT | BDB::LOG_AUTOREMOVE | BDB::TXN_WRITE_NOSYNC); |
121 | $env->set_flags (BDB::AUTO_COMMIT | BDB::REGION_INIT); |
|
|
122 | $env->set_flags (&BDB::LOG_AUTOREMOVE ) if BDB::VERSION v0, v4.7; |
|
|
123 | $env->log_set_config (&BDB::LOG_AUTO_REMOVE) if BDB::VERSION v4.7; |
|
|
124 | |
|
|
125 | $env->set_timeout (3, BDB::SET_TXN_TIMEOUT); |
|
|
126 | $env->set_timeout (3, BDB::SET_LOCK_TIMEOUT); |
|
|
127 | |
43 | $DB_ENV->set_cachesize (0, 2048 * 1024, 0); |
128 | $env->set_cachesize (0, 2048 * 1024, 0); |
44 | |
129 | |
45 | db_env_open $DB_ENV, $DB_HOME, |
130 | db_env_open $env, $DB_HOME, |
46 | BDB::CREATE | BDB::REGISTER | BDB::RECOVER | BDB::INIT_MPOOL | BDB::INIT_LOCK | BDB::INIT_TXN, |
131 | BDB::CREATE | BDB::REGISTER | BDB::RECOVER | BDB::INIT_MPOOL | BDB::INIT_LOCK | BDB::INIT_TXN, |
47 | 0666; |
132 | 0666; |
48 | |
133 | |
49 | $! and die "cannot open database environment $DB_HOME: " . BDB::strerror; |
134 | $! and die "cannot open database environment $DB_HOME: " . BDB::strerror; |
|
|
135 | |
|
|
136 | # now we go through the registered processes, if there is only one, we verify all files |
|
|
137 | # to make sure windows didn'T corrupt them (as windows does....) |
|
|
138 | try_verify_env $env |
|
|
139 | or die "database environment failed verification"; |
|
|
140 | |
|
|
141 | $DB_ENV = $env; |
50 | |
142 | |
51 | 1 |
143 | 1 |
52 | } |
144 | } |
53 | |
145 | |
54 | sub table($) { |
146 | sub table($) { |
55 | $DB_TABLE{$_[0]} ||= do { |
147 | $DB_TABLE{$_[0]} ||= do { |
56 | my ($table) = @_; |
148 | my ($table) = @_; |
57 | |
149 | |
58 | $table =~ s/([^a-zA-Z0-9_\-])/sprintf "=%x=", ord $1/ge; |
150 | $table =~ s/([^a-zA-Z0-9_\-])/sprintf "=%x=", ord $1/ge; |
59 | |
151 | |
|
|
152 | $DB_ENV#d# |
|
|
153 | or return ::clienterror ("trying to create table $_[0] with empty db_env $DB_ENV" => 1);#d# |
|
|
154 | |
60 | my $db = db_create $DB_ENV; |
155 | my $db = db_create $DB_ENV; |
61 | $db->set_flags (BDB::CHKSUM); |
156 | $db->set_flags (BDB::CHKSUM); |
62 | |
157 | |
63 | db_open $db, undef, $table, undef, BDB::BTREE, |
158 | db_open $db, undef, $table, undef, BDB::BTREE, |
64 | BDB::AUTO_COMMIT | BDB::CREATE | BDB::READ_UNCOMMITTED, 0666; |
159 | BDB::AUTO_COMMIT | BDB::CREATE | BDB::READ_UNCOMMITTED, 0666; |
… | |
… | |
69 | } |
164 | } |
70 | } |
165 | } |
71 | |
166 | |
72 | ############################################################################# |
167 | ############################################################################# |
73 | |
168 | |
74 | unless (eval { open_db }) { |
169 | our $WATCHER; |
75 | warn "$@";#d# |
170 | our $SYNC; |
76 | eval { File::Path::rmtree $DB_HOME }; |
|
|
77 | open_db; |
|
|
78 | } |
|
|
79 | |
|
|
80 | our $WATCHER = EV::io BDB::poll_fileno, EV::READ, \&BDB::poll_cb; |
|
|
81 | |
|
|
82 | our $SYNC = EV::timer_ns 0, 60, sub { |
|
|
83 | $_[0]->stop; |
|
|
84 | db_env_txn_checkpoint $DB_ENV, 0, 0, 0, sub { }; |
|
|
85 | }; |
|
|
86 | |
|
|
87 | our $tilemap; |
171 | our $facemap; |
88 | |
172 | |
89 | sub exists($$$) { |
173 | sub exists($$$) { |
90 | my ($db, $key, $cb) = @_; |
174 | my ($db, $key, $cb) = @_; |
91 | |
175 | |
92 | my $data; |
176 | my $data; |
… | |
… | |
134 | my ($name, $cb) = @_; |
218 | my ($name, $cb) = @_; |
135 | |
219 | |
136 | my $table = table "facemap"; |
220 | my $table = table "facemap"; |
137 | my $id; |
221 | my $id; |
138 | |
222 | |
139 | db_get $table, undef, $name, $id, 0; |
223 | db_get $table, undef, $name => $id, 0; |
140 | return $cb->($id) unless $!; |
224 | $! or return $cb->($id); |
141 | |
225 | |
142 | for (1..100) { |
226 | unless ($TILE_SEQ) { |
143 | my $txn = $DB_ENV->txn_begin; |
227 | $TILE_SEQ = $table->sequence; |
144 | db_get $table, $txn, id => $id, 0; |
228 | $TILE_SEQ->initial_value (FIRST_TILE_ID); |
|
|
229 | $TILE_SEQ->set_cachesize (0); |
|
|
230 | db_sequence_open $TILE_SEQ, undef, "id", BDB::CREATE; |
|
|
231 | } |
145 | |
232 | |
146 | $id = 64 if $id < 64; |
233 | db_sequence_get $TILE_SEQ, undef, 1, my $id; |
147 | |
234 | |
148 | ++$id; |
235 | die "unable to allocate tile id: $!" |
149 | |
236 | if $!; |
150 | db_put $table, $txn, id => $id, 0; |
|
|
151 | db_txn_finish $txn; |
|
|
152 | |
|
|
153 | $SYNC->again unless $SYNC->is_active; |
|
|
154 | |
|
|
155 | return $cb->($id) unless $!; |
|
|
156 | |
|
|
157 | select undef, undef, undef, 0.01 * rand; |
|
|
158 | } |
237 | |
|
|
238 | db_put $table, undef, $name => $id, 0; |
|
|
239 | $cb->($id); |
159 | |
240 | |
160 | die "maximum number of transaction retries reached - database problems?"; |
|
|
161 | } |
241 | } |
162 | |
242 | |
163 | sub get_tile_id_sync($) { |
243 | sub get_tile_id_sync($) { |
164 | my ($name) = @_; |
244 | my ($name) = @_; |
165 | |
245 | |
166 | # fetch the full face table first |
|
|
167 | unless ($tilemap) { |
|
|
168 | do_table facemap => sub { |
|
|
169 | $tilemap = $_[0]; |
|
|
170 | delete $tilemap->{id}; |
|
|
171 | my %maptile = reverse %$tilemap;#d# |
|
|
172 | if ((scalar keys %$tilemap) != (scalar keys %maptile)) {#d# |
|
|
173 | $tilemap = { };#d# |
|
|
174 | CFPlus::error "FATAL: facemap is not a 1:1 mapping, please report this and delete your $DB_HOME directory!\n";#d# |
|
|
175 | }#d# |
|
|
176 | }; |
|
|
177 | BDB::flush; |
|
|
178 | } |
|
|
179 | |
|
|
180 | $tilemap->{$name} ||= do { |
246 | $facemap->{$name} ||= do { |
181 | my $id; |
247 | my $id; |
182 | do_get_tile_id $name, sub { |
248 | do_get_tile_id $name, sub { |
183 | $id = $_[0]; |
249 | $id = $_[0]; |
184 | }; |
250 | }; |
185 | BDB::flush; |
251 | BDB::flush; |
… | |
… | |
194 | "$DB_HOME/res-data-" . unpack "H*", $_[0] |
260 | "$DB_HOME/res-data-" . unpack "H*", $_[0] |
195 | } |
261 | } |
196 | |
262 | |
197 | sub sync { |
263 | sub sync { |
198 | # for debugging |
264 | # for debugging |
199 | #CFPlus::DB::Server::req (sync => sub { }); |
265 | #DC::DB::Server::req (sync => sub { }); |
200 | CFPlus::DB::Server::sync (); |
266 | DC::DB::Server::sync (); |
201 | } |
267 | } |
202 | |
268 | |
203 | sub unlink($$) { |
269 | sub unlink($$) { |
204 | CFPlus::DB::Server::req (unlink => @_); |
270 | DC::DB::Server::req (unlink => @_); |
205 | } |
271 | } |
206 | |
272 | |
207 | sub read_file($$) { |
273 | sub read_file($$) { |
208 | CFPlus::DB::Server::req (read_file => @_); |
274 | DC::DB::Server::req (read_file => @_); |
209 | } |
275 | } |
210 | |
276 | |
211 | sub write_file($$$) { |
277 | sub write_file($$$) { |
212 | CFPlus::DB::Server::req (write_file => @_); |
278 | DC::DB::Server::req (write_file => @_); |
213 | } |
279 | } |
214 | |
280 | |
215 | sub prefetch_file($$$) { |
281 | sub prefetch_file($$$) { |
216 | CFPlus::DB::Server::req (prefetch_file => @_); |
282 | DC::DB::Server::req (prefetch_file => @_); |
217 | } |
283 | } |
218 | |
284 | |
219 | sub logprint($$$) { |
285 | sub logprint($$$) { |
220 | CFPlus::DB::Server::req (logprint => @_); |
286 | DC::DB::Server::req (logprint => @_); |
221 | } |
287 | } |
222 | |
288 | |
|
|
289 | ############################################################################# |
|
|
290 | |
223 | package CFPlus::DB::Server; |
291 | package DC::DB::Server; |
224 | |
292 | |
225 | use strict; |
293 | use strict; |
226 | |
294 | |
227 | use EV (); |
295 | use EV (); |
228 | use Fcntl; |
296 | use Fcntl; |
… | |
… | |
349 | |
417 | |
350 | print { $LOG_FH{$path} } "$ts $line\n" |
418 | print { $LOG_FH{$path} } "$ts $line\n" |
351 | } |
419 | } |
352 | |
420 | |
353 | sub run { |
421 | sub run { |
354 | ($FH, my $fh) = CFPlus::socketpipe; |
422 | ($FH, my $fh) = AnyEvent::Util::portable_socketpair |
|
|
423 | or die "unable to create database socketpair: $!"; |
355 | |
424 | |
356 | my $oldfh = select $FH; $| = 1; select $oldfh; |
425 | my $oldfh = select $FH; $| = 1; select $oldfh; |
357 | my $oldfh = select $fh; $| = 1; select $oldfh; |
426 | my $oldfh = select $fh; $| = 1; select $oldfh; |
358 | |
427 | |
359 | my $pid = fork; |
428 | my $pid = fork; |
360 | |
429 | |
361 | if (defined $pid && !$pid) { |
430 | if (defined $pid && !$pid) { |
362 | local $SIG{QUIT}; |
431 | local $SIG{QUIT} = "IGNORE"; |
363 | local $SIG{__DIE__}; |
432 | local $SIG{__DIE__}; |
364 | local $SIG{__WARN__}; |
433 | local $SIG{__WARN__}; |
365 | eval { |
434 | eval { |
366 | close $FH; |
435 | close $FH; |
367 | |
436 | |
… | |
… | |
373 | or die "unexpected eof while reading request"; |
442 | or die "unexpected eof while reading request"; |
374 | |
443 | |
375 | $req = Storable::thaw $req; |
444 | $req = Storable::thaw $req; |
376 | |
445 | |
377 | my ($id, $type, @args) = @$req; |
446 | my ($id, $type, @args) = @$req; |
378 | my $cb = CFPlus::DB::Server->can ("do_$type") |
447 | my $cb = DC::DB::Server->can ("do_$type") |
379 | or die "$type: unknown database request type\n"; |
448 | or die "$type: unknown database request type\n"; |
380 | my $res = pack "N/a*", Storable::freeze [$id, $cb->(@args)]; |
449 | my $res = pack "N/a*", Storable::freeze [$id, $cb->(@args)]; |
381 | (syswrite $fh, $res) == length $res |
450 | (syswrite $fh, $res) == length $res |
382 | or die "DB::write: $!"; |
451 | or die "DB::write: $!"; |
383 | } |
452 | } |
… | |
… | |
387 | |
456 | |
388 | eval { |
457 | eval { |
389 | Storable::store_fd [die => $error], $fh; |
458 | Storable::store_fd [die => $error], $fh; |
390 | }; |
459 | }; |
391 | |
460 | |
392 | warn $error; |
461 | warn $error |
|
|
462 | if $error; |
393 | |
463 | |
394 | CFPlus::_exit 0; |
464 | DC::_exit 0; |
395 | } |
465 | } |
396 | |
466 | |
397 | close $fh; |
467 | close $fh; |
398 | CFPlus::fh_nonblocking $FH, 1; |
468 | DC::fh_nonblocking $FH, 1; |
399 | |
469 | |
400 | $CB{die} = sub { die shift }; |
470 | $CB{die} = sub { die shift }; |
401 | |
471 | |
402 | $fh_r_watcher = EV::io $FH, EV::READ , \&fh_read; |
472 | $fh_r_watcher = EV::io $FH, EV::READ , \&fh_read; |
403 | $fh_w_watcher = EV::io $FH, EV::WRITE, \&fh_write; |
473 | $fh_w_watcher = EV::io $FH, EV::WRITE, \&fh_write; |
… | |
… | |
405 | |
475 | |
406 | sub stop { |
476 | sub stop { |
407 | close $FH; |
477 | close $FH; |
408 | } |
478 | } |
409 | |
479 | |
|
|
480 | package DC::DB; |
|
|
481 | |
|
|
482 | sub nuke_db { |
|
|
483 | undef $DB_ENV; |
|
|
484 | undef $DB_ENV_FH; |
|
|
485 | |
|
|
486 | File::Path::mkpath [$DB_HOME]; |
|
|
487 | eval { File::Path::rmtree $DB_HOME }; |
|
|
488 | } |
|
|
489 | |
|
|
490 | sub open_db { |
|
|
491 | unless (eval { try_open_db }) { |
|
|
492 | warn "$@";#d# |
|
|
493 | eval { nuke_db }; |
|
|
494 | try_open_db; |
|
|
495 | } |
|
|
496 | |
|
|
497 | # fetch the full face table first |
|
|
498 | unless ($facemap) { |
|
|
499 | do_table facemap => sub { |
|
|
500 | $facemap = $_[0]; |
|
|
501 | delete $facemap->{id}; |
|
|
502 | my %maptile = reverse %$facemap;#d# |
|
|
503 | if ((scalar keys %$facemap) != (scalar keys %maptile)) {#d# |
|
|
504 | $facemap = { };#d# |
|
|
505 | DC::error "FATAL: facemap is not a 1:1 mapping, please report this and delete your $DB_HOME directory!\n";#d# |
|
|
506 | }#d# |
|
|
507 | }; |
|
|
508 | } |
|
|
509 | |
|
|
510 | $WATCHER = EV::io BDB::poll_fileno, EV::READ, \&BDB::poll_cb; |
|
|
511 | $SYNC = EV::timer_ns 0, 60, sub { |
|
|
512 | $_[0]->stop; |
|
|
513 | db_env_txn_checkpoint $DB_ENV, 0, 0, 0, sub { }; |
|
|
514 | }; |
|
|
515 | } |
|
|
516 | |
|
|
517 | END { |
|
|
518 | db_env_txn_checkpoint $DB_ENV, 0, 0, 0 |
|
|
519 | if $DB_ENV; |
|
|
520 | |
|
|
521 | undef $TILE_SEQ; |
|
|
522 | %DB_TABLE = (); |
|
|
523 | undef $DB_ENV; |
|
|
524 | } |
|
|
525 | |
410 | 1; |
526 | 1; |
411 | |
527 | |
412 | =back |
528 | =back |
413 | |
529 | |
414 | =head1 AUTHOR |
530 | =head1 AUTHOR |