1 |
=head1 NAME |
2 |
|
3 |
DC::DB - async. database and filesystem access for deliantra |
4 |
|
5 |
=head1 SYNOPSIS |
6 |
|
7 |
use DC::DB; |
8 |
|
9 |
=head1 DESCRIPTION |
10 |
|
11 |
=over 4 |
12 |
|
13 |
=cut |
14 |
|
15 |
package DC::DB; |
16 |
|
17 |
use strict; |
18 |
use utf8; |
19 |
|
20 |
use File::Path (); |
21 |
use Carp (); |
22 |
use Storable (); |
23 |
use Config; |
24 |
use BDB; |
25 |
|
26 |
use DC; |
27 |
|
28 |
our $ODBDIR = "cfplus-" . BDB::VERSION_MAJOR . "." . BDB::VERSION_MINOR . "-$Config{archname}"; |
29 |
our $DBDIR = "client-" . BDB::VERSION_MAJOR . "." . BDB::VERSION_MINOR . "-$Config{archname}"; |
30 |
our $DB_HOME = "$Deliantra::VARDIR/$DBDIR"; |
31 |
|
32 |
sub FIRST_TILE_ID () { 64 } |
33 |
|
34 |
unless (-d $DB_HOME) { |
35 |
if (-d "$Deliantra::VARDIR/$ODBDIR") { |
36 |
rename "$Deliantra::VARDIR/$ODBDIR", $DB_HOME; |
37 |
print STDERR "INFO: moved old database from $Deliantra::VARDIR/$ODBDIR to $DB_HOME\n"; |
38 |
} elsif (-d "$Deliantra::OLDDIR/$ODBDIR") { |
39 |
rename "$Deliantra::OLDDIR/$DBDIR", $DB_HOME; |
40 |
print STDERR "INFO: moved old database from $Deliantra::OLDDIR/$ODBDIR to $DB_HOME\n"; |
41 |
} else { |
42 |
File::Path::mkpath [$DB_HOME] |
43 |
or die "unable to create database directory $DB_HOME: $!"; |
44 |
} |
45 |
} |
46 |
|
47 |
BDB::max_poll_time 0.03; |
48 |
BDB::max_parallel 1; |
49 |
|
50 |
our $DB_ENV; |
51 |
our $DB_STATE; |
52 |
our %DB_TABLE; |
53 |
our $TILE_SEQ; |
54 |
|
55 |
sub try_open_db { |
56 |
File::Path::mkpath [$DB_HOME]; |
57 |
|
58 |
my $env = db_env_create; |
59 |
|
60 |
$env->set_errfile (\*STDERR); |
61 |
$env->set_msgfile (\*STDERR); |
62 |
$env->set_verbose (-1, 1); |
63 |
|
64 |
$env->set_flags (BDB::AUTO_COMMIT | BDB::REGION_INIT); |
65 |
$env->set_flags (&BDB::LOG_AUTOREMOVE ) if BDB::VERSION v0, v4.7; |
66 |
$env->log_set_config (&BDB::LOG_AUTO_REMOVE) if BDB::VERSION v4.7; |
67 |
|
68 |
$env->set_timeout (3, BDB::SET_TXN_TIMEOUT); |
69 |
$env->set_timeout (3, BDB::SET_LOCK_TIMEOUT); |
70 |
|
71 |
$env->set_cachesize (0, 2048 * 1024, 0); |
72 |
|
73 |
db_env_open $env, $DB_HOME, |
74 |
BDB::CREATE | BDB::REGISTER | BDB::RECOVER | BDB::INIT_MPOOL | BDB::INIT_LOCK | BDB::INIT_TXN, |
75 |
0666; |
76 |
|
77 |
$! and die "cannot open database environment $DB_HOME: " . BDB::strerror; |
78 |
|
79 |
$DB_ENV = $env; |
80 |
|
81 |
1 |
82 |
} |
83 |
|
84 |
sub table($) { |
85 |
$DB_TABLE{$_[0]} ||= do { |
86 |
my ($table) = @_; |
87 |
|
88 |
$table =~ s/([^a-zA-Z0-9_\-])/sprintf "=%x=", ord $1/ge; |
89 |
|
90 |
$DB_ENV#d# |
91 |
or return ::clienterror ("trying to create table $_[0] with empty db_env $DB_ENV" => 1);#d# |
92 |
|
93 |
my $db = db_create $DB_ENV; |
94 |
$db->set_flags (BDB::CHKSUM); |
95 |
|
96 |
db_open $db, undef, $table, undef, BDB::BTREE, |
97 |
BDB::AUTO_COMMIT | BDB::CREATE | BDB::READ_UNCOMMITTED, 0666; |
98 |
|
99 |
$! and "unable to open/create database table $_[0]: ". BDB::strerror; |
100 |
|
101 |
$db |
102 |
} |
103 |
} |
104 |
|
105 |
############################################################################# |
106 |
|
107 |
our $WATCHER; |
108 |
our $SYNC; |
109 |
our $facemap; |
110 |
|
111 |
sub exists($$$) { |
112 |
my ($db, $key, $cb) = @_; |
113 |
|
114 |
my $data; |
115 |
db_get table $db, undef, $key, $data, 0, sub { |
116 |
$cb->($! ? () : length $data); |
117 |
}; |
118 |
} |
119 |
|
120 |
sub get($$$) { |
121 |
my ($db, $key, $cb) = @_; |
122 |
|
123 |
my $data; |
124 |
db_get table $db, undef, $key, $data, 0, sub { |
125 |
$cb->($! ? () : $data); |
126 |
}; |
127 |
} |
128 |
|
129 |
sub put($$$$) { |
130 |
my ($db, $key, $data, $cb) = @_; |
131 |
|
132 |
db_put table $db, undef, $key, $data, 0, sub { |
133 |
$cb->($!); |
134 |
$SYNC->again unless $SYNC->is_active; |
135 |
}; |
136 |
} |
137 |
|
138 |
sub do_table { |
139 |
my ($db, $cb) = @_; |
140 |
|
141 |
$db = table $db; |
142 |
|
143 |
my $cursor = $db->cursor; |
144 |
my %kv; |
145 |
|
146 |
for (;;) { |
147 |
db_c_get $cursor, my $k, my $v, BDB::NEXT; |
148 |
last if $!; |
149 |
$kv{$k} = $v; |
150 |
} |
151 |
|
152 |
$cb->(\%kv); |
153 |
} |
154 |
|
155 |
sub do_get_tile_id { |
156 |
my ($name, $cb) = @_; |
157 |
|
158 |
my $table = table "facemap"; |
159 |
my $id; |
160 |
|
161 |
db_get $table, undef, $name => $id, 0; |
162 |
$! or return $cb->($id); |
163 |
|
164 |
unless ($TILE_SEQ) { |
165 |
$TILE_SEQ = $table->sequence; |
166 |
$TILE_SEQ->initial_value (FIRST_TILE_ID); |
167 |
$TILE_SEQ->set_cachesize (0); |
168 |
db_sequence_open $TILE_SEQ, undef, "id", BDB::CREATE; |
169 |
} |
170 |
|
171 |
db_sequence_get $TILE_SEQ, undef, 1, my $id; |
172 |
|
173 |
die "unable to allocate tile id: $!" |
174 |
if $!; |
175 |
|
176 |
db_put $table, undef, $name => $id, 0; |
177 |
$cb->($id); |
178 |
|
179 |
} |
180 |
|
181 |
sub get_tile_id_sync($) { |
182 |
my ($name) = @_; |
183 |
|
184 |
$facemap->{$name} ||= do { |
185 |
my $id; |
186 |
do_get_tile_id $name, sub { |
187 |
$id = $_[0]; |
188 |
}; |
189 |
BDB::flush; |
190 |
$id |
191 |
} |
192 |
} |
193 |
|
194 |
############################################################################# |
195 |
|
196 |
sub path_of_res($) { |
197 |
utf8::downgrade $_[0]; # bug in unpack "H*" |
198 |
"$DB_HOME/res-data-" . unpack "H*", $_[0] |
199 |
} |
200 |
|
201 |
sub sync { |
202 |
# for debugging |
203 |
#DC::DB::Server::req (sync => sub { }); |
204 |
DC::DB::Server::sync (); |
205 |
} |
206 |
|
207 |
sub unlink($$) { |
208 |
DC::DB::Server::req (unlink => @_); |
209 |
} |
210 |
|
211 |
sub read_file($$) { |
212 |
DC::DB::Server::req (read_file => @_); |
213 |
} |
214 |
|
215 |
sub write_file($$$) { |
216 |
DC::DB::Server::req (write_file => @_); |
217 |
} |
218 |
|
219 |
sub prefetch_file($$$) { |
220 |
DC::DB::Server::req (prefetch_file => @_); |
221 |
} |
222 |
|
223 |
sub logprint($$$) { |
224 |
DC::DB::Server::req (logprint => @_); |
225 |
} |
226 |
|
227 |
############################################################################# |
228 |
|
229 |
package DC::DB::Server; |
230 |
|
231 |
use strict; |
232 |
|
233 |
use EV (); |
234 |
use Fcntl; |
235 |
|
236 |
our %CB; |
237 |
our $FH; |
238 |
our $ID = "aaa0"; |
239 |
our ($fh_r_watcher, $fh_w_watcher); |
240 |
our $sync_timer; |
241 |
our $write_buf; |
242 |
our $read_buf; |
243 |
|
244 |
sub fh_write { |
245 |
my $len = syswrite $FH, $write_buf; |
246 |
|
247 |
substr $write_buf, 0, $len, ""; |
248 |
|
249 |
$fh_w_watcher->stop |
250 |
unless length $write_buf; |
251 |
} |
252 |
|
253 |
sub fh_read { |
254 |
my $status = sysread $FH, $read_buf, 16384, length $read_buf; |
255 |
|
256 |
die "FATAL: database process died\n" |
257 |
if $status == 0 && defined $status; |
258 |
|
259 |
while () { |
260 |
return if 4 > length $read_buf; |
261 |
my $len = unpack "N", $read_buf; |
262 |
|
263 |
return if $len + 4 > length $read_buf; |
264 |
|
265 |
substr $read_buf, 0, 4, ""; |
266 |
my $res = Storable::thaw substr $read_buf, 0, $len, ""; |
267 |
|
268 |
my ($id, @args) = @$res; |
269 |
(delete $CB{$id})->(@args); |
270 |
} |
271 |
} |
272 |
|
273 |
sub sync { |
274 |
# biggest mess evarr |
275 |
my $fds; (vec $fds, fileno $FH, 1) = 1; |
276 |
|
277 |
while (1 < scalar keys %CB) { |
278 |
my $r = $fds; |
279 |
my $w = length $write_buf ? $fds : undef; |
280 |
select $r, $w, undef, undef; |
281 |
|
282 |
fh_write if vec $w, fileno $FH, 1; |
283 |
fh_read if vec $r, fileno $FH, 1; |
284 |
} |
285 |
} |
286 |
|
287 |
sub req { |
288 |
my ($type, @args) = @_; |
289 |
my $cb = pop @args; |
290 |
|
291 |
my $id = ++$ID; |
292 |
$write_buf .= pack "N/a*", Storable::freeze [$id, $type, @args]; |
293 |
$CB{$id} = $cb; |
294 |
|
295 |
$fh_w_watcher->start; |
296 |
} |
297 |
|
298 |
sub do_unlink { |
299 |
unlink $_[0]; |
300 |
} |
301 |
|
302 |
sub do_read_file { |
303 |
my ($path) = @_; |
304 |
|
305 |
utf8::downgrade $path; |
306 |
open my $fh, "<:raw", $path |
307 |
or return; |
308 |
sysread $fh, my $buf, -s $fh; |
309 |
|
310 |
$buf |
311 |
} |
312 |
|
313 |
sub do_write_file { |
314 |
my ($path, $data) = @_; |
315 |
|
316 |
utf8::downgrade $path; |
317 |
utf8::downgrade $data; |
318 |
open my $fh, ">:raw", $path |
319 |
or return; |
320 |
syswrite $fh, $data; |
321 |
close $fh; |
322 |
|
323 |
1 |
324 |
} |
325 |
|
326 |
sub do_prefetch_file { |
327 |
my ($path, $size) = @_; |
328 |
|
329 |
utf8::downgrade $path; |
330 |
open my $fh, "<:raw", $path |
331 |
or return; |
332 |
sysread $fh, my $buf, $size; |
333 |
|
334 |
1 |
335 |
} |
336 |
|
337 |
our %LOG_FH; |
338 |
|
339 |
sub do_logprint { |
340 |
my ($path, $line) = @_; |
341 |
|
342 |
$LOG_FH{$path} ||= do { |
343 |
open my $fh, ">>:utf8", $path |
344 |
or warn "Couldn't open logfile $path: $!"; |
345 |
|
346 |
$fh->autoflush (1); |
347 |
|
348 |
$fh |
349 |
}; |
350 |
|
351 |
my ($sec, $min, $hour, $mday, $mon, $year) = localtime time; |
352 |
|
353 |
my $ts = sprintf "%04d-%02d-%02d %02d:%02d:%02d", |
354 |
$year + 1900, $mon + 1, $mday, $hour, $min, $sec; |
355 |
|
356 |
print { $LOG_FH{$path} } "$ts $line\n" |
357 |
} |
358 |
|
359 |
sub run { |
360 |
($FH, my $fh) = DC::socketpipe; |
361 |
|
362 |
my $oldfh = select $FH; $| = 1; select $oldfh; |
363 |
my $oldfh = select $fh; $| = 1; select $oldfh; |
364 |
|
365 |
my $pid = fork; |
366 |
|
367 |
if (defined $pid && !$pid) { |
368 |
local $SIG{QUIT} = "IGNORE"; |
369 |
local $SIG{__DIE__}; |
370 |
local $SIG{__WARN__}; |
371 |
eval { |
372 |
close $FH; |
373 |
|
374 |
while () { |
375 |
4 == read $fh, my $len, 4 |
376 |
or last; |
377 |
$len = unpack "N", $len; |
378 |
$len == read $fh, my $req, $len |
379 |
or die "unexpected eof while reading request"; |
380 |
|
381 |
$req = Storable::thaw $req; |
382 |
|
383 |
my ($id, $type, @args) = @$req; |
384 |
my $cb = DC::DB::Server->can ("do_$type") |
385 |
or die "$type: unknown database request type\n"; |
386 |
my $res = pack "N/a*", Storable::freeze [$id, $cb->(@args)]; |
387 |
(syswrite $fh, $res) == length $res |
388 |
or die "DB::write: $!"; |
389 |
} |
390 |
}; |
391 |
|
392 |
my $error = $@; |
393 |
|
394 |
eval { |
395 |
Storable::store_fd [die => $error], $fh; |
396 |
}; |
397 |
|
398 |
warn $error |
399 |
if $error; |
400 |
|
401 |
DC::_exit 0; |
402 |
} |
403 |
|
404 |
close $fh; |
405 |
DC::fh_nonblocking $FH, 1; |
406 |
|
407 |
$CB{die} = sub { die shift }; |
408 |
|
409 |
$fh_r_watcher = EV::io $FH, EV::READ , \&fh_read; |
410 |
$fh_w_watcher = EV::io $FH, EV::WRITE, \&fh_write; |
411 |
} |
412 |
|
413 |
sub stop { |
414 |
close $FH; |
415 |
} |
416 |
|
417 |
package DC::DB; |
418 |
|
419 |
sub nuke_db { |
420 |
File::Path::mkpath [$DB_HOME]; |
421 |
eval { File::Path::rmtree $DB_HOME }; |
422 |
} |
423 |
|
424 |
sub open_db { |
425 |
unless (eval { try_open_db }) { |
426 |
warn "$@";#d# |
427 |
eval { nuke_db }; |
428 |
try_open_db; |
429 |
} |
430 |
|
431 |
# fetch the full face table first |
432 |
unless ($facemap) { |
433 |
do_table facemap => sub { |
434 |
$facemap = $_[0]; |
435 |
delete $facemap->{id}; |
436 |
my %maptile = reverse %$facemap;#d# |
437 |
if ((scalar keys %$facemap) != (scalar keys %maptile)) {#d# |
438 |
$facemap = { };#d# |
439 |
DC::error "FATAL: facemap is not a 1:1 mapping, please report this and delete your $DB_HOME directory!\n";#d# |
440 |
}#d# |
441 |
}; |
442 |
} |
443 |
|
444 |
$WATCHER = EV::io BDB::poll_fileno, EV::READ, \&BDB::poll_cb; |
445 |
$SYNC = EV::timer_ns 0, 60, sub { |
446 |
$_[0]->stop; |
447 |
db_env_txn_checkpoint $DB_ENV, 0, 0, 0, sub { }; |
448 |
}; |
449 |
} |
450 |
|
451 |
END { |
452 |
db_env_txn_checkpoint $DB_ENV, 0, 0, 0 |
453 |
if $DB_ENV; |
454 |
|
455 |
undef $TILE_SEQ; |
456 |
%DB_TABLE = (); |
457 |
undef $DB_ENV; |
458 |
} |
459 |
|
460 |
1; |
461 |
|
462 |
=back |
463 |
|
464 |
=head1 AUTHOR |
465 |
|
466 |
Marc Lehmann <schmorp@schmorp.de> |
467 |
http://home.schmorp.de/ |
468 |
|
469 |
=cut |
470 |
|