1 |
=head1 NAME |
2 |
|
3 |
CFPlus::DB - async. database and filesystem access for cfplus |
4 |
|
5 |
=head1 SYNOPSIS |
6 |
|
7 |
use CFPlus::DB; |
8 |
|
9 |
=head1 DESCRIPTION |
10 |
|
11 |
=over 4 |
12 |
|
13 |
=cut |
14 |
|
15 |
package CFPlus::DB; |
16 |
|
17 |
use strict; |
18 |
use utf8; |
19 |
|
20 |
use Carp (); |
21 |
use Storable (); |
22 |
use Config; |
23 |
use Event (); |
24 |
|
25 |
use CFPlus; |
26 |
|
27 |
our $DB_HOME = "$Crossfire::VARDIR/cfplus-$BerkeleyDB::db_version-$Config{archname}"; |
28 |
|
29 |
sub path_of_res($) { |
30 |
utf8::downgrade $_[0]; # bug in unpack "H*" |
31 |
"$DB_HOME/res-data-" . unpack "H*", $_[0] |
32 |
} |
33 |
|
34 |
sub sync { |
35 |
# for debugging |
36 |
#CFPlus::DB::Server::req (sync => sub { }); |
37 |
CFPlus::DB::Server::sync (); |
38 |
} |
39 |
|
40 |
sub exists($$$) { |
41 |
CFPlus::DB::Server::req (exists => @_); |
42 |
} |
43 |
|
44 |
sub get($$$) { |
45 |
CFPlus::DB::Server::req (get => @_); |
46 |
} |
47 |
|
48 |
sub put($$$$) { |
49 |
CFPlus::DB::Server::req (put => @_); |
50 |
} |
51 |
|
52 |
sub unlink($$) { |
53 |
CFPlus::DB::Server::req (unlink => @_); |
54 |
} |
55 |
|
56 |
sub read_file($$) { |
57 |
CFPlus::DB::Server::req (read_file => @_); |
58 |
} |
59 |
|
60 |
sub write_file($$$) { |
61 |
CFPlus::DB::Server::req (write_file => @_); |
62 |
} |
63 |
|
64 |
sub prefetch_file($$$) { |
65 |
CFPlus::DB::Server::req (prefetch_file => @_); |
66 |
} |
67 |
|
68 |
sub logprint($$$) { |
69 |
CFPlus::DB::Server::req (logprint => @_); |
70 |
} |
71 |
|
72 |
our $tilemap; |
73 |
|
74 |
sub get_tile_id_sync($) { |
75 |
my ($hash) = @_; |
76 |
|
77 |
# fetch the full face table first |
78 |
unless ($tilemap) { |
79 |
CFPlus::DB::Server::req (table => facemap => sub { $tilemap = $_[0] }); |
80 |
sync; |
81 |
} |
82 |
|
83 |
$tilemap->{$hash} ||= do { |
84 |
my $id; |
85 |
CFPlus::DB::Server::req (get_tile_id => $hash, sub { $id = $_[0] }); |
86 |
sync; |
87 |
$id |
88 |
} |
89 |
} |
90 |
|
91 |
package CFPlus::DB::Server; |
92 |
|
93 |
use strict; |
94 |
|
95 |
use Fcntl; |
96 |
use BerkeleyDB; |
97 |
|
98 |
our $DB_ENV; |
99 |
our $DB_STATE; |
100 |
our %DB_TABLE; |
101 |
|
102 |
sub open_db { |
103 |
mkdir $DB_HOME, 0777; |
104 |
my $recover = $BerkeleyDB::db_version >= 4.4 |
105 |
? eval "DB_REGISTER | DB_RECOVER" |
106 |
: 0; |
107 |
|
108 |
$DB_ENV = new BerkeleyDB::Env |
109 |
-Home => $DB_HOME, |
110 |
-Cachesize => 8_000_000, |
111 |
-ErrFile => "$DB_HOME/errorlog.txt", |
112 |
# -ErrPrefix => "DATABASE", |
113 |
-Verbose => 1, |
114 |
-Flags => DB_CREATE | DB_RECOVER | DB_INIT_MPOOL | DB_INIT_LOCK | DB_INIT_TXN | $recover, |
115 |
-SetFlags => DB_AUTO_COMMIT | DB_LOG_AUTOREMOVE, |
116 |
or die "unable to create/open database home $DB_HOME: $BerkeleyDB::Error"; |
117 |
|
118 |
1 |
119 |
} |
120 |
|
121 |
sub table($) { |
122 |
$DB_TABLE{$_[0]} ||= do { |
123 |
my ($table) = @_; |
124 |
|
125 |
$table =~ s/([^a-zA-Z0-9_\-])/sprintf "=%x=", ord $1/ge; |
126 |
|
127 |
new BerkeleyDB::Btree |
128 |
-Env => $DB_ENV, |
129 |
-Filename => $table, |
130 |
# -Filename => "database", |
131 |
# -Subname => $table, |
132 |
-Property => DB_CHKSUM, |
133 |
-Flags => DB_CREATE | DB_UPGRADE, |
134 |
or die "unable to create/open database table $_[0]: $BerkeleyDB::Error" |
135 |
} |
136 |
} |
137 |
|
138 |
our %CB; |
139 |
our $FH; |
140 |
our $ID = "aaa0"; |
141 |
our ($fh_r_watcher, $fh_w_watcher); |
142 |
our $sync_timer; |
143 |
our $write_buf; |
144 |
our $read_buf; |
145 |
|
146 |
our $SYNC = Event->idle (min => 5, max => 60, parked => 1, cb => sub { |
147 |
CFPlus::DB::Server::req (sync => sub { }); |
148 |
$_[0]->w->stop; |
149 |
}); |
150 |
|
151 |
sub fh_write { |
152 |
my $len = syswrite $FH, $write_buf; |
153 |
|
154 |
substr $write_buf, 0, $len, ""; |
155 |
|
156 |
$fh_w_watcher->stop |
157 |
unless length $write_buf; |
158 |
} |
159 |
|
160 |
sub fh_read { |
161 |
my $status = sysread $FH, $read_buf, 16384, length $read_buf; |
162 |
|
163 |
die "FATAL: database process died\n" |
164 |
if $status == 0 && defined $status; |
165 |
|
166 |
while () { |
167 |
return if 4 > length $read_buf; |
168 |
my $len = unpack "N", $read_buf; |
169 |
|
170 |
return if $len + 4 > length $read_buf; |
171 |
|
172 |
substr $read_buf, 0, 4, ""; |
173 |
my $res = Storable::thaw substr $read_buf, 0, $len, ""; |
174 |
|
175 |
my ($id, @args) = @$res; |
176 |
(delete $CB{$id})->(@args); |
177 |
} |
178 |
} |
179 |
|
180 |
sub sync { |
181 |
# biggest mess evarr |
182 |
my $fds; (vec $fds, fileno $FH, 1) = 1; |
183 |
|
184 |
while (1 < scalar keys %CB) { |
185 |
my $r = $fds; |
186 |
my $w = length $write_buf ? $fds : undef; |
187 |
select $r, $w, undef, undef; |
188 |
|
189 |
fh_write if vec $w, fileno $FH, 1; |
190 |
fh_read if vec $r, fileno $FH, 1; |
191 |
} |
192 |
} |
193 |
|
194 |
sub req { |
195 |
my ($type, @args) = @_; |
196 |
my $cb = pop @args; |
197 |
|
198 |
my $id = ++$ID; |
199 |
$write_buf .= pack "N/a*", Storable::freeze [$id, $type, @args]; |
200 |
$CB{$id} = $cb; |
201 |
|
202 |
$fh_w_watcher->start; |
203 |
$SYNC->start; |
204 |
} |
205 |
|
206 |
sub do_sync { |
207 |
$DB_ENV->txn_checkpoint (0, 0, 0); |
208 |
() |
209 |
} |
210 |
|
211 |
sub do_exists { |
212 |
my ($db, $key) = @_; |
213 |
|
214 |
utf8::downgrade $key; |
215 |
my $data; |
216 |
(table $db)->db_get ($key, $data) == 0 |
217 |
? length $data |
218 |
: () |
219 |
} |
220 |
|
221 |
sub do_get { |
222 |
my ($db, $key) = @_; |
223 |
|
224 |
utf8::downgrade $key; |
225 |
my $data; |
226 |
(table $db)->db_get ($key, $data) == 0 |
227 |
? $data |
228 |
: () |
229 |
} |
230 |
|
231 |
sub do_put { |
232 |
my ($db, $key, $data) = @_; |
233 |
|
234 |
utf8::downgrade $key; |
235 |
utf8::downgrade $data; |
236 |
(table $db)->db_put ($key => $data) |
237 |
} |
238 |
|
239 |
sub do_table { |
240 |
my ($db) = @_; |
241 |
|
242 |
$db = table $db; |
243 |
|
244 |
my $cursor = $db->db_cursor; |
245 |
my %kv; |
246 |
my ($k, $v); |
247 |
$kv{$k} = $v while $cursor->c_get ($k, $v, BerkeleyDB::DB_NEXT) == 0; |
248 |
|
249 |
\%kv |
250 |
} |
251 |
|
252 |
sub do_get_tile_id { |
253 |
my ($hash) = @_; |
254 |
|
255 |
my $id; |
256 |
my $table = table "facemap"; |
257 |
|
258 |
return $id |
259 |
if $table->db_get ($hash, $id) == 0; |
260 |
|
261 |
for (1..100) { |
262 |
my $txn = $DB_ENV->txn_begin; |
263 |
my $status = $table->db_get (id => $id); |
264 |
if ($status == 0 || $status == BerkeleyDB::DB_NOTFOUND) { |
265 |
$id = ($id || 64) + 1; |
266 |
if ($table->db_put (id => $id) == 0 |
267 |
&& $table->db_put ($hash => $id) == 0) { |
268 |
$txn->txn_commit; |
269 |
|
270 |
return $id; |
271 |
} |
272 |
} |
273 |
$txn->txn_abort; |
274 |
} |
275 |
|
276 |
die "maximum number of transaction retries reached - database problems?"; |
277 |
} |
278 |
|
279 |
sub do_unlink { |
280 |
unlink $_[0]; |
281 |
} |
282 |
|
283 |
sub do_read_file { |
284 |
my ($path) = @_; |
285 |
|
286 |
utf8::downgrade $path; |
287 |
open my $fh, "<:raw", $path |
288 |
or return; |
289 |
sysread $fh, my $buf, -s $fh; |
290 |
|
291 |
$buf |
292 |
} |
293 |
|
294 |
sub do_write_file { |
295 |
my ($path, $data) = @_; |
296 |
|
297 |
utf8::downgrade $path; |
298 |
utf8::downgrade $data; |
299 |
open my $fh, ">:raw", $path |
300 |
or return; |
301 |
syswrite $fh, $data; |
302 |
close $fh; |
303 |
|
304 |
1 |
305 |
} |
306 |
|
307 |
sub do_prefetch_file { |
308 |
my ($path, $size) = @_; |
309 |
|
310 |
utf8::downgrade $path; |
311 |
open my $fh, "<:raw", $path |
312 |
or return; |
313 |
sysread $fh, my $buf, $size; |
314 |
|
315 |
1 |
316 |
} |
317 |
|
318 |
our %LOG_FH; |
319 |
|
320 |
sub do_logprint { |
321 |
my ($path, $line) = @_; |
322 |
|
323 |
$LOG_FH{$path} ||= do { |
324 |
open my $fh, ">>:utf8", $path |
325 |
or warn "Couldn't open logfile $path: $!"; |
326 |
|
327 |
$fh->autoflush (1); |
328 |
|
329 |
$fh |
330 |
}; |
331 |
|
332 |
my ($sec, $min, $hour, $mday, $mon, $year) = localtime time; |
333 |
|
334 |
my $ts = sprintf "%04d-%02d-%02d %02d:%02d:%02d", |
335 |
$year + 1900, $mon + 1, $mday, $hour, $min, $sec; |
336 |
|
337 |
print { $LOG_FH{$path} } "$ts $line\n" |
338 |
} |
339 |
|
340 |
sub run { |
341 |
($FH, my $fh) = CFPlus::socketpipe; |
342 |
|
343 |
my $oldfh = select $FH; $| = 1; select $oldfh; |
344 |
my $oldfh = select $fh; $| = 1; select $oldfh; |
345 |
|
346 |
my $pid = fork; |
347 |
|
348 |
if (defined $pid && !$pid) { |
349 |
local $SIG{QUIT}; |
350 |
local $SIG{__DIE__}; |
351 |
local $SIG{__WARN__}; |
352 |
eval { |
353 |
close $FH; |
354 |
|
355 |
unless (eval { open_db }) { |
356 |
eval { File::Path::rmtree $DB_HOME }; |
357 |
open_db; |
358 |
} |
359 |
|
360 |
while () { |
361 |
4 == read $fh, my $len, 4 |
362 |
or last; |
363 |
$len = unpack "N", $len; |
364 |
$len == read $fh, my $req, $len |
365 |
or die "unexpected eof while reading request"; |
366 |
|
367 |
$req = Storable::thaw $req; |
368 |
|
369 |
my ($id, $type, @args) = @$req; |
370 |
my $cb = CFPlus::DB::Server->can ("do_$type") |
371 |
or die "$type: unknown database request type\n"; |
372 |
my $res = pack "N/a*", Storable::freeze [$id, $cb->(@args)]; |
373 |
(syswrite $fh, $res) == length $res |
374 |
or die; |
375 |
} |
376 |
}; |
377 |
|
378 |
my $error = $@; |
379 |
|
380 |
eval { |
381 |
undef %DB_TABLE; |
382 |
undef $DB_ENV; |
383 |
|
384 |
Storable::store_fd [die => $error], $fh; |
385 |
}; |
386 |
|
387 |
CFPlus::_exit 0; |
388 |
} |
389 |
|
390 |
close $fh; |
391 |
CFPlus::fh_nonblocking $FH, 1; |
392 |
|
393 |
$CB{die} = sub { die shift }; |
394 |
|
395 |
$fh_r_watcher = Event->io (fd => $FH, poll => 'r', nice => 1, cb => \&fh_read); |
396 |
$fh_w_watcher = Event->io (fd => $FH, poll => 'w', nice => -1, parked => 1, cb => \&fh_write); |
397 |
$SYNC->start; |
398 |
} |
399 |
|
400 |
sub stop { |
401 |
close $FH; |
402 |
} |
403 |
|
404 |
1; |
405 |
|
406 |
=back |
407 |
|
408 |
=head1 AUTHOR |
409 |
|
410 |
Marc Lehmann <schmorp@schmorp.de> |
411 |
http://home.schmorp.de/ |
412 |
|
413 |
=cut |
414 |
|