1 | =head1 NAME |
1 | =head1 NAME |
2 | |
2 | |
3 | CFPlus::DB - async. database and filesystem access for cfplus |
3 | DC::DB - async. database and filesystem access for deliantra |
4 | |
4 | |
5 | =head1 SYNOPSIS |
5 | =head1 SYNOPSIS |
6 | |
6 | |
7 | use CFPlus::DB; |
7 | use DC::DB; |
8 | |
8 | |
9 | =head1 DESCRIPTION |
9 | =head1 DESCRIPTION |
10 | |
10 | |
11 | =over 4 |
11 | =over 4 |
12 | |
12 | |
13 | =cut |
13 | =cut |
14 | |
14 | |
15 | package CFPlus::DB; |
15 | package DC::DB; |
16 | |
16 | |
17 | use strict; |
17 | use common::sense; |
18 | use utf8; |
|
|
19 | |
18 | |
|
|
19 | use File::Path (); |
20 | use Carp (); |
20 | use Carp (); |
21 | use Storable (); |
21 | use Storable (); |
|
|
22 | use AnyEvent::Util (); |
22 | use Config; |
23 | use Config; |
23 | use BDB; |
24 | use BDB; |
|
|
25 | use Fcntl (); |
24 | |
26 | |
25 | use CFPlus; |
27 | use DC; |
26 | |
28 | |
27 | our $DB_HOME = "$Crossfire::VARDIR/cfplus-" . BDB::VERSION . "-$Config{archname}"; |
29 | our $ODBDIR = "cfplus-" . BDB::VERSION_MAJOR . "." . BDB::VERSION_MINOR . "-$Config{archname}"; |
|
|
30 | our $DBDIR = "client-" . BDB::VERSION_MAJOR . "." . BDB::VERSION_MINOR . "-$Config{archname}"; |
|
|
31 | our $DB_HOME = "$Deliantra::VARDIR/$DBDIR"; |
|
|
32 | |
|
|
33 | sub FIRST_TILE_ID () { 64 } |
|
|
34 | |
|
|
35 | unless (-d $DB_HOME) { |
|
|
36 | if (-d "$Deliantra::VARDIR/$ODBDIR") { |
|
|
37 | rename "$Deliantra::VARDIR/$ODBDIR", $DB_HOME; |
|
|
38 | print STDERR "INFO: moved old database from $Deliantra::VARDIR/$ODBDIR to $DB_HOME\n"; |
|
|
39 | } elsif (-d "$Deliantra::OLDDIR/$ODBDIR") { |
|
|
40 | rename "$Deliantra::OLDDIR/$DBDIR", $DB_HOME; |
|
|
41 | print STDERR "INFO: moved old database from $Deliantra::OLDDIR/$ODBDIR to $DB_HOME\n"; |
|
|
42 | } else { |
|
|
43 | File::Path::mkpath [$DB_HOME] |
|
|
44 | or die "unable to create database directory $DB_HOME: $!"; |
|
|
45 | } |
|
|
46 | } |
|
|
47 | |
|
|
48 | BDB::max_poll_time 0.03; |
|
|
49 | BDB::max_parallel 1; |
28 | |
50 | |
29 | our $DB_ENV; |
51 | our $DB_ENV; |
|
|
52 | our $DB_ENV_FH; |
30 | our $DB_STATE; |
53 | our $DB_STATE; |
31 | our %DB_TABLE; |
54 | our %DB_TABLE; |
|
|
55 | our $TILE_SEQ; |
32 | |
56 | |
|
|
57 | sub all_databases { |
|
|
58 | opendir my $fh, $DB_HOME |
|
|
59 | or return; |
|
|
60 | |
|
|
61 | grep !/^(?:\.|log\.|_)/, readdir $fh |
|
|
62 | } |
|
|
63 | |
|
|
64 | sub try_verify_env($) { |
|
|
65 | my ($env) = @_; |
|
|
66 | |
|
|
67 | open my $lock, "+>$DB_HOME/__lock" |
|
|
68 | or die "__lock: $!"; |
|
|
69 | |
|
|
70 | flock $lock, &Fcntl::LOCK_EX |
|
|
71 | or die "flock: $!"; |
|
|
72 | |
|
|
73 | # we look at the __db.register env file that has been created by now |
|
|
74 | # and check for the number of registered processes - if there is |
|
|
75 | # only one, we verify all databases, otherwise we skip this |
|
|
76 | # we MUST NOT close the filehandle as longa swe keep the env open, as |
|
|
77 | # this destroys the record locks on it. |
|
|
78 | open $DB_ENV_FH, "<$DB_HOME/__db.register" |
|
|
79 | or die "__db.register: $!"; |
|
|
80 | |
|
|
81 | # __db.register contains one record per process, with X signifying |
|
|
82 | # empty records (of course, this is completely private to bdb...) |
|
|
83 | my $count = grep /^[^X]/, <$DB_ENV_FH>; |
|
|
84 | |
|
|
85 | if ($count == 1) { |
|
|
86 | # if any databases are corrupted, we simply delete all of them |
|
|
87 | |
|
|
88 | for (all_databases) { |
|
|
89 | my $dbh = db_create $env |
|
|
90 | or last; |
|
|
91 | |
|
|
92 | # a failed verify will panic the environment, which is fine with us |
|
|
93 | db_verify $dbh, "$DB_HOME/$_"; |
|
|
94 | |
|
|
95 | return if $!; # nuke database and recreate if verification failure |
|
|
96 | } |
|
|
97 | |
|
|
98 | } |
|
|
99 | |
|
|
100 | # close probably cleans those up, but we also want to run on windows, |
|
|
101 | # so better be safe. |
|
|
102 | flock $lock, &Fcntl::LOCK_UN |
|
|
103 | or die "funlock: $!"; |
|
|
104 | |
|
|
105 | 1 |
|
|
106 | } |
|
|
107 | |
33 | sub open_db { |
108 | sub try_open_db { |
34 | mkdir $DB_HOME, 0777; |
109 | File::Path::mkpath [$DB_HOME]; |
35 | |
110 | |
|
|
111 | undef $DB_ENV; |
|
|
112 | undef $DB_ENV_FH; |
|
|
113 | |
36 | $DB_ENV = db_env_create; |
114 | my $env = db_env_create; |
37 | |
115 | |
38 | $DB_ENV->set_errfile (\*STDERR); |
116 | $env->set_errfile (\*STDERR); |
39 | $DB_ENV->set_msgfile (\*STDERR); |
117 | $env->set_msgfile (\*STDERR); |
40 | $DB_ENV->set_verbose (-1, 1); |
118 | $env->set_verbose (-1, 1); |
41 | |
119 | |
42 | $DB_ENV->set_flags (BDB::AUTO_COMMIT | BDB::LOG_AUTOREMOVE | BDB::TXN_WRITE_NOSYNC); |
120 | $env->set_flags (BDB::AUTO_COMMIT | BDB::REGION_INIT); |
|
|
121 | $env->set_flags (&BDB::LOG_AUTOREMOVE ) if BDB::VERSION v0, v4.7; |
|
|
122 | $env->log_set_config (&BDB::LOG_AUTO_REMOVE) if BDB::VERSION v4.7; |
|
|
123 | |
|
|
124 | $env->set_timeout (3, BDB::SET_TXN_TIMEOUT); |
|
|
125 | $env->set_timeout (3, BDB::SET_LOCK_TIMEOUT); |
|
|
126 | |
43 | $DB_ENV->set_cachesize (0, 2048 * 1024, 0); |
127 | $env->set_cachesize (0, 2048 * 1024, 0); |
44 | |
128 | |
45 | db_env_open $DB_ENV, $DB_HOME, |
129 | db_env_open $env, $DB_HOME, |
46 | BDB::CREATE | BDB::REGISTER | BDB::RECOVER | BDB::INIT_MPOOL | BDB::INIT_LOCK | BDB::INIT_TXN, |
130 | BDB::CREATE | BDB::REGISTER | BDB::RECOVER | BDB::INIT_MPOOL | BDB::INIT_LOCK | BDB::INIT_TXN, |
47 | 0666; |
131 | 0666; |
48 | |
132 | |
49 | $! and die "cannot open database environment $DB_HOME: " . BDB::strerror; |
133 | $! and die "cannot open database environment $DB_HOME: " . BDB::strerror; |
|
|
134 | |
|
|
135 | # now we go through the registered processes, if there is only one, we verify all files |
|
|
136 | # to make sure windows didn't corrupt them (as windows does....) |
|
|
137 | try_verify_env $env |
|
|
138 | or die "database environment failed verification"; |
|
|
139 | |
|
|
140 | $DB_ENV = $env; |
50 | |
141 | |
51 | 1 |
142 | 1 |
52 | } |
143 | } |
53 | |
144 | |
54 | sub table($) { |
145 | sub table($) { |
55 | $DB_TABLE{$_[0]} ||= do { |
146 | $DB_TABLE{$_[0]} ||= do { |
56 | my ($table) = @_; |
147 | my ($table) = @_; |
57 | |
148 | |
58 | $table =~ s/([^a-zA-Z0-9_\-])/sprintf "=%x=", ord $1/ge; |
149 | $table =~ s/([^a-zA-Z0-9_\-])/sprintf "=%x=", ord $1/ge; |
59 | |
150 | |
|
|
151 | $DB_ENV#d# |
|
|
152 | or return ::clienterror ("trying to create table $_[0] with empty db_env $DB_ENV" => 1);#d# |
|
|
153 | |
60 | my $db = db_create $DB_ENV; |
154 | my $db = db_create $DB_ENV; |
61 | $db->set_flags (BDB::CHKSUM); |
155 | $db->set_flags (BDB::CHKSUM); |
62 | |
156 | |
63 | db_open $db, undef, $table, undef, BDB::BTREE, |
157 | db_open $db, undef, $table, undef, BDB::BTREE, |
64 | BDB::AUTO_COMMIT | BDB::CREATE | BDB::READ_UNCOMMITTED, 0666; |
158 | BDB::AUTO_COMMIT | BDB::CREATE | BDB::READ_UNCOMMITTED, 0666; |
… | |
… | |
69 | } |
163 | } |
70 | } |
164 | } |
71 | |
165 | |
72 | ############################################################################# |
166 | ############################################################################# |
73 | |
167 | |
74 | unless (eval { open_db }) { |
168 | our $WATCHER; |
75 | warn "$@";#d# |
169 | our $SYNC; |
76 | eval { File::Path::rmtree $DB_HOME }; |
|
|
77 | open_db; |
|
|
78 | } |
|
|
79 | |
|
|
80 | our $WATCHER = EV::io BDB::poll_fileno, EV::READ, \&BDB::poll_cb; |
|
|
81 | |
|
|
82 | our $SYNC = EV::timer_ns 0, 60, sub { |
|
|
83 | $_[0]->stop; |
|
|
84 | db_env_txn_checkpoint $DB_ENV, 0, 0, 0, sub { }; |
|
|
85 | }; |
|
|
86 | |
|
|
87 | our $tilemap; |
170 | our $facemap; |
88 | |
171 | |
89 | sub exists($$$) { |
172 | sub exists($$$) { |
90 | my ($db, $key, $cb) = @_; |
173 | my ($db, $key, $cb) = @_; |
91 | |
174 | |
92 | my $data; |
175 | my $data; |
… | |
… | |
134 | my ($name, $cb) = @_; |
217 | my ($name, $cb) = @_; |
135 | |
218 | |
136 | my $table = table "facemap"; |
219 | my $table = table "facemap"; |
137 | my $id; |
220 | my $id; |
138 | |
221 | |
139 | db_get $table, undef, $name, $id, 0; |
222 | db_get $table, undef, $name => $id, 0; |
140 | return $cb->($id) unless $!; |
223 | $! or return $cb->($id); |
141 | |
224 | |
142 | for (1..100) { |
225 | unless ($TILE_SEQ) { |
143 | my $txn = $DB_ENV->txn_begin; |
226 | $TILE_SEQ = $table->sequence; |
144 | db_get $table, $txn, id => $id, 0; |
227 | $TILE_SEQ->initial_value (FIRST_TILE_ID); |
|
|
228 | $TILE_SEQ->set_cachesize (0); |
|
|
229 | db_sequence_open $TILE_SEQ, undef, "id", BDB::CREATE; |
|
|
230 | } |
145 | |
231 | |
146 | $id = 64 if $id < 64; |
232 | db_sequence_get $TILE_SEQ, undef, 1, my $id; |
147 | |
233 | |
148 | ++$id; |
234 | die "unable to allocate tile id: $!" |
149 | |
235 | if $!; |
150 | db_put $table, $txn, id => $id, 0; |
|
|
151 | db_txn_finish $txn; |
|
|
152 | |
|
|
153 | $SYNC->again unless $SYNC->is_active; |
|
|
154 | |
|
|
155 | return $cb->($id) unless $!; |
|
|
156 | |
|
|
157 | select undef, undef, undef, 0.01 * rand; |
|
|
158 | } |
236 | |
|
|
237 | db_put $table, undef, $name => $id, 0; |
|
|
238 | $cb->($id); |
159 | |
239 | |
160 | die "maximum number of transaction retries reached - database problems?"; |
|
|
161 | } |
240 | } |
162 | |
241 | |
163 | sub get_tile_id_sync($) { |
242 | sub get_tile_id_sync($) { |
164 | my ($name) = @_; |
243 | my ($name) = @_; |
165 | |
244 | |
166 | # fetch the full face table first |
|
|
167 | unless ($tilemap) { |
|
|
168 | do_table facemap => sub { |
|
|
169 | $tilemap = $_[0]; |
|
|
170 | delete $tilemap->{id}; |
|
|
171 | my %maptile = reverse %$tilemap;#d# |
|
|
172 | if ((scalar keys %$tilemap) != (scalar keys %maptile)) {#d# |
|
|
173 | $tilemap = { };#d# |
|
|
174 | CFPlus::error "FATAL: facemap is not a 1:1 mapping, please report this and delete your $DB_HOME directory!\n";#d# |
|
|
175 | }#d# |
|
|
176 | }; |
|
|
177 | BDB::flush; |
|
|
178 | } |
|
|
179 | |
|
|
180 | $tilemap->{$name} ||= do { |
245 | $facemap->{$name} ||= do { |
181 | my $id; |
246 | my $id; |
182 | do_get_tile_id $name, sub { |
247 | do_get_tile_id $name, sub { |
183 | $id = $_[0]; |
248 | $id = $_[0]; |
184 | }; |
249 | }; |
185 | BDB::flush; |
250 | BDB::flush; |
… | |
… | |
194 | "$DB_HOME/res-data-" . unpack "H*", $_[0] |
259 | "$DB_HOME/res-data-" . unpack "H*", $_[0] |
195 | } |
260 | } |
196 | |
261 | |
197 | sub sync { |
262 | sub sync { |
198 | # for debugging |
263 | # for debugging |
199 | #CFPlus::DB::Server::req (sync => sub { }); |
264 | #DC::DB::Server::req (sync => sub { }); |
200 | CFPlus::DB::Server::sync (); |
265 | DC::DB::Server::sync (); |
201 | } |
266 | } |
202 | |
267 | |
203 | sub unlink($$) { |
268 | sub unlink($$) { |
204 | CFPlus::DB::Server::req (unlink => @_); |
269 | DC::DB::Server::req (unlink => @_); |
205 | } |
270 | } |
206 | |
271 | |
207 | sub read_file($$) { |
272 | sub read_file($$) { |
208 | CFPlus::DB::Server::req (read_file => @_); |
273 | DC::DB::Server::req (read_file => @_); |
209 | } |
274 | } |
210 | |
275 | |
211 | sub write_file($$$) { |
276 | sub write_file($$$) { |
212 | CFPlus::DB::Server::req (write_file => @_); |
277 | DC::DB::Server::req (write_file => @_); |
213 | } |
278 | } |
214 | |
279 | |
215 | sub prefetch_file($$$) { |
280 | sub prefetch_file($$$) { |
216 | CFPlus::DB::Server::req (prefetch_file => @_); |
281 | DC::DB::Server::req (prefetch_file => @_); |
217 | } |
282 | } |
218 | |
283 | |
219 | sub logprint($$$) { |
284 | sub logprint($$$) { |
220 | CFPlus::DB::Server::req (logprint => @_); |
285 | DC::DB::Server::req (logprint => @_); |
221 | } |
286 | } |
222 | |
287 | |
|
|
288 | ############################################################################# |
|
|
289 | |
223 | package CFPlus::DB::Server; |
290 | package DC::DB::Server; |
224 | |
291 | |
225 | use strict; |
292 | use common::sense; |
226 | |
293 | |
227 | use EV (); |
294 | use EV (); |
228 | use Fcntl; |
295 | use Fcntl; |
229 | |
296 | |
230 | our %CB; |
297 | our %CB; |
… | |
… | |
349 | |
416 | |
350 | print { $LOG_FH{$path} } "$ts $line\n" |
417 | print { $LOG_FH{$path} } "$ts $line\n" |
351 | } |
418 | } |
352 | |
419 | |
353 | sub run { |
420 | sub run { |
354 | ($FH, my $fh) = CFPlus::socketpipe; |
421 | ($FH, my $fh) = AnyEvent::Util::portable_socketpair |
|
|
422 | or die "unable to create database socketpair: $!"; |
355 | |
423 | |
356 | my $oldfh = select $FH; $| = 1; select $oldfh; |
424 | my $oldfh = select $FH; $| = 1; select $oldfh; |
357 | my $oldfh = select $fh; $| = 1; select $oldfh; |
425 | my $oldfh = select $fh; $| = 1; select $oldfh; |
358 | |
426 | |
359 | my $pid = fork; |
427 | my $pid = fork; |
360 | |
428 | |
361 | if (defined $pid && !$pid) { |
429 | if (defined $pid && !$pid) { |
362 | local $SIG{QUIT}; |
430 | local $SIG{QUIT} = "IGNORE"; |
363 | local $SIG{__DIE__}; |
431 | local $SIG{__DIE__}; |
364 | local $SIG{__WARN__}; |
432 | local $SIG{__WARN__}; |
365 | eval { |
433 | eval { |
366 | close $FH; |
434 | close $FH; |
367 | |
435 | |
… | |
… | |
373 | or die "unexpected eof while reading request"; |
441 | or die "unexpected eof while reading request"; |
374 | |
442 | |
375 | $req = Storable::thaw $req; |
443 | $req = Storable::thaw $req; |
376 | |
444 | |
377 | my ($id, $type, @args) = @$req; |
445 | my ($id, $type, @args) = @$req; |
378 | my $cb = CFPlus::DB::Server->can ("do_$type") |
446 | my $cb = DC::DB::Server->can ("do_$type") |
379 | or die "$type: unknown database request type\n"; |
447 | or die "$type: unknown database request type\n"; |
380 | my $res = pack "N/a*", Storable::freeze [$id, $cb->(@args)]; |
448 | my $res = pack "N/a*", Storable::freeze [$id, $cb->(@args)]; |
381 | (syswrite $fh, $res) == length $res |
449 | (syswrite $fh, $res) == length $res |
382 | or die "DB::write: $!"; |
450 | or die "DB::write: $!"; |
383 | } |
451 | } |
… | |
… | |
390 | }; |
458 | }; |
391 | |
459 | |
392 | warn $error |
460 | warn $error |
393 | if $error; |
461 | if $error; |
394 | |
462 | |
395 | CFPlus::_exit 0; |
463 | DC::_exit 0; |
396 | } |
464 | } |
397 | |
465 | |
398 | close $fh; |
466 | close $fh; |
399 | CFPlus::fh_nonblocking $FH, 1; |
467 | DC::fh_nonblocking $FH, 1; |
400 | |
468 | |
401 | $CB{die} = sub { die shift }; |
469 | $CB{die} = sub { die shift }; |
402 | |
470 | |
403 | $fh_r_watcher = EV::io $FH, EV::READ , \&fh_read; |
471 | $fh_r_watcher = EV::io $FH, EV::READ , \&fh_read; |
404 | $fh_w_watcher = EV::io $FH, EV::WRITE, \&fh_write; |
472 | $fh_w_watcher = EV::io $FH, EV::WRITE, \&fh_write; |
… | |
… | |
406 | |
474 | |
407 | sub stop { |
475 | sub stop { |
408 | close $FH; |
476 | close $FH; |
409 | } |
477 | } |
410 | |
478 | |
|
|
479 | package DC::DB; |
|
|
480 | |
|
|
481 | sub nuke_db { |
|
|
482 | undef $DB_ENV; |
|
|
483 | undef $DB_ENV_FH; |
|
|
484 | |
|
|
485 | File::Path::mkpath [$DB_HOME]; |
|
|
486 | eval { File::Path::rmtree $DB_HOME }; |
|
|
487 | } |
|
|
488 | |
|
|
489 | sub open_db { |
|
|
490 | unless (eval { try_open_db }) { |
|
|
491 | warn "$@";#d# |
|
|
492 | eval { nuke_db }; |
|
|
493 | try_open_db; |
|
|
494 | } |
|
|
495 | |
|
|
496 | # fetch the full face table first |
|
|
497 | unless ($facemap) { |
|
|
498 | do_table facemap => sub { |
|
|
499 | $facemap = $_[0]; |
|
|
500 | delete $facemap->{id}; |
|
|
501 | my %maptile = reverse %$facemap;#d# |
|
|
502 | if ((scalar keys %$facemap) != (scalar keys %maptile)) {#d# |
|
|
503 | $facemap = { };#d# |
|
|
504 | DC::error "FATAL: facemap is not a 1:1 mapping, please report this and delete your $DB_HOME directory!\n";#d# |
|
|
505 | }#d# |
|
|
506 | }; |
|
|
507 | } |
|
|
508 | |
|
|
509 | $WATCHER = EV::io BDB::poll_fileno, EV::READ, \&BDB::poll_cb; |
|
|
510 | $SYNC = EV::timer_ns 0, 60, sub { |
|
|
511 | $_[0]->stop; |
|
|
512 | db_env_txn_checkpoint $DB_ENV, 0, 0, 0, sub { }; |
|
|
513 | }; |
|
|
514 | } |
|
|
515 | |
|
|
516 | END { |
|
|
517 | db_env_txn_checkpoint $DB_ENV, 0, 0, 0 |
|
|
518 | if $DB_ENV; |
|
|
519 | |
|
|
520 | undef $TILE_SEQ; |
|
|
521 | %DB_TABLE = (); |
|
|
522 | undef $DB_ENV; |
|
|
523 | } |
|
|
524 | |
411 | 1; |
525 | 1; |
412 | |
526 | |
413 | =back |
527 | =back |
414 | |
528 | |
415 | =head1 AUTHOR |
529 | =head1 AUTHOR |