1 package OpenILS::WWW::EGCatLoader;
2 use strict; use warnings;
3 use Apache2::Const -compile => qw(OK DECLINED FORBIDDEN HTTP_INTERNAL_SERVER_ERROR REDIRECT HTTP_BAD_REQUEST);
4 use OpenSRF::Utils::Logger qw/$logger/;
5 use OpenILS::Utils::CStoreEditor qw/:funcs/;
6 use OpenILS::Utils::Fieldmapper;
7 use OpenILS::Application::AppUtils;
10 my $U = 'OpenILS::Application::AppUtils';
12 our $ac_types = ['toc', 'anotes', 'excerpt', 'summary', 'reviews'];
20 $ctx->{page} = 'record';
22 $self->timelog("load_record() began");
24 my $rec_id = $ctx->{page_args}->[0];
26 return Apache2::Const::HTTP_BAD_REQUEST
27 unless $rec_id and $rec_id =~ /^\d+$/;
29 $self->added_content_stage1($rec_id);
30 $self->timelog("past added content stage 1");
32 my $org = $self->_get_search_lib();
33 my $org_name = $ctx->{get_aou}->($org)->shortname;
34 my $pref_ou = $self->_get_pref_lib();
35 my $depth = $self->cgi->param('depth');
36 $depth = $ctx->{get_aou}->($org)->ou_type->depth
37 unless defined $depth; # can be 0
39 my $copy_depth = $self->cgi->param('copy_depth');
40 $copy_depth = $depth unless defined $copy_depth; # can be 0
41 $self->ctx->{copy_depth} = $copy_depth;
43 my $copy_limit = int($self->cgi->param('copy_limit') || 10);
44 my $copy_offset = int($self->cgi->param('copy_offset') || 0);
46 $self->get_staff_search_settings;
47 if ($ctx->{staff_saved_search_size}) {
48 $ctx->{saved_searches} = ($self->staff_load_searches)[1];
50 $self->timelog("past staff saved searches");
52 $self->fetch_related_search_info($rec_id) unless $kwargs{no_search};
53 $self->timelog("past related search info");
55 # Check for user and load lists and prefs
56 if ($self->ctx->{user}) {
57 $self->_load_lists_and_settings;
58 $self->timelog("load user lists and settings");
61 # run copy retrieval in parallel to bib retrieval
63 my $cstore = OpenSRF::AppSession->create('open-ils.cstore');
64 $cstore->session_locale($OpenILS::Utils::CStoreEditor::default_locale);
65 my $copy_rec = $cstore->request(
66 'open-ils.cstore.json_query.atomic',
67 $self->mk_copy_query($rec_id, $org, $copy_depth, $copy_limit, $copy_offset, $pref_ou)
70 # find foreign copy data
71 my $peer_rec = $U->simplereq(
73 'open-ils.search.peer_bibs', $rec_id );
75 $ctx->{foreign_copies} = $peer_rec;
77 my (undef, @rec_data) = $self->get_records_and_facets([$rec_id], undef, {
78 flesh => '{holdings_xml,bmp,mra,acp,acnp,acns}',
84 $self->timelog("past get_records_and_facets()");
85 $ctx->{bre_id} = $rec_data[0]->{id};
86 $ctx->{marc_xml} = $rec_data[0]->{marc_xml};
88 $ctx->{copies} = $copy_rec->gather(1);
90 # Add public copy notes to each copy - and while we're in there, grab peer bib records
91 foreach my $copy (@{$ctx->{copies}}) {
92 $copy->{notes} = $U->simplereq(
94 'open-ils.circ.copy_note.retrieve.all',
95 {itemid => $copy->{id}, pub => 1 }
97 $copy->{peer_bibs} = $U->simplereq(
99 'open-ils.search.multi_home.bib_ids.by_barcode',
102 foreach my $bib (@{$copy->{peer_bibs}}) {
103 my (undef, @peer_data) = $self->get_records_and_facets(
105 flesh => '{holdings_xml,acp,acnp,acns,exclude_invisible_acn}',
110 #$copy->{peer_bib_marc} = $peer_data[0]->{marc_xml};
111 push @peer_marc,$peer_data[0]->{marc_xml};
113 $copy->{peer_bib_marc} = \@peer_marc;
116 $self->timelog("past store copy retrieval call");
117 $ctx->{copy_limit} = $copy_limit;
118 $ctx->{copy_offset} = $copy_offset;
120 $ctx->{have_holdings_to_show} = 0;
121 $ctx->{have_mfhd_to_show} = 0;
123 $self->get_hold_copy_summary($rec_id, $org);
125 $self->timelog("past get_hold_copy_summary()");
126 $self->ctx->{bib_is_dead} = OpenILS::Application::AppUtils->is_true(
127 OpenILS::Utils::CStoreEditor->new->json_query({
128 select => { bre => [ 'deleted' ] },
130 where => { 'id' => $rec_id }
137 $ctx->{get_org_setting}->
138 ($org, "opac.fully_compressed_serial_holdings")
140 # We're loading this data here? Are we therefore assuming that we
141 # *are* going to display something in the "issues" expandy?
142 $self->load_serial_holding_summaries($rec_id, $org, $copy_depth);
144 $ctx->{mfhd_summaries} =
145 $self->get_mfhd_summaries($rec_id, $org, $copy_depth);
147 if ($ctx->{mfhd_summaries} && scalar(@{$ctx->{mfhd_summaries}})
149 $ctx->{have_mfhd_to_show} = 1;
153 $self->timelog("past serials holding stuff");
157 $ctx->{marchtml} = $self->mk_marc_html($rec_id);
164 $self->prepare_browse_call_numbers();
168 my @expand = $self->cgi->param('expand');
169 if (grep {$_ eq 'all'} @expand) {
170 $ctx->{expand_all} = 1;
171 $expandies{$_}->() for keys %expandies;
174 for my $exp (@expand) {
175 $ctx->{"expand_$exp"} = 1;
176 $expandies{$exp}->() if exists $expandies{$exp};
180 $self->timelog("past expandies");
182 $self->added_content_stage2($rec_id);
184 $self->timelog("past added content stage 2");
186 return Apache2::Const::OK;
189 # collect IDs and info on the search that lead to this details page
190 # If no search query, etc is present, we leave ctx.search_result_index == -1
191 sub fetch_related_search_info {
194 my $ctx = $self->ctx;
195 $ctx->{search_result_index} = -1;
197 $self->load_rresults(internal => 1);
199 my @search_ids = @{$ctx->{ids}};
200 return unless @search_ids;
202 for my $idx (0..$#search_ids) {
203 if ($search_ids[$idx] == $rec_id) {
204 $ctx->{prev_search_record} = $search_ids[$idx - 1] if $idx > 0;
205 $ctx->{next_search_record} = $search_ids[$idx + 1];
206 $ctx->{search_result_index} = $idx;
211 $ctx->{first_search_record} = $search_ids[0];
212 $ctx->{last_search_record} = $search_ids[-1];
221 my $copy_limit = shift;
222 my $copy_offset = shift;
225 my $query = $U->basic_opac_copy_query(
226 $rec_id, undef, undef, $copy_limit, $copy_offset, $self->ctx->{is_staff}
229 if($org != $self->ctx->{aou_tree}->()->id) {
230 # no need to add the org join filter if we're not actually filtering
231 $query->{from}->{acp}->{aou} = {
239 transform => 'actor.org_unit_descendants',
240 result_field => 'id',
244 where => {id => $org}
251 # Unsure if we want these in the shared function, leaving here for now
252 unshift(@{$query->{order_by}},
253 { class => "aou", field => 'id',
254 transform => 'evergreen.rank_ou', params => [$org, $pref_ou]
257 push(@{$query->{order_by}},
258 { class => "acp", field => 'status',
259 transform => 'evergreen.rank_cp_status'
267 my($self, $rec_id) = @_;
269 # could be optimized considerably by performing the xslt on the already fetched record
270 return $U->simplereq(
272 'open-ils.search.biblio.record.html', $rec_id, 1);
275 sub load_serial_holding_summaries {
276 my ($self, $rec_id, $org, $depth) = @_;
278 my $limit = $self->cgi->param("slimit") || 10;
279 my $offset = $self->cgi->param("soffset") || 0;
281 my $serial = create OpenSRF::AppSession("open-ils.serial");
283 # First, get the tree of /summaries/ of holdings.
284 my $tree = $serial->request(
285 "open-ils.serial.holding_summary_tree.by_bib",
286 $rec_id, $org, $depth, $limit, $offset
289 return if $self->apache_log_if_event(
290 $tree, "getting holding summary tree for record $rec_id"
293 # Next, if requested, get a list of individual holdings under a
294 # particular summary.
296 my $summary_id = int($self->cgi->param("sid") || 0);
297 my $summary_type = $self->cgi->param("stype");
299 if ($summary_id and $summary_type) {
300 my $expand_path = [ $self->cgi->param("sepath") ],
301 my $expand_limit = $self->cgi->param("selimit");
302 my $expand_offsets = [ $self->cgi->param("seoffset") ];
303 my $auto_expand_first = 0;
305 if (not @$expand_offsets) {
306 $expand_offsets = undef;
307 $auto_expand_first = 1;
310 $holdings = $serial->request(
311 "open-ils.serial.holdings.grouped_by_summary",
312 $summary_type, $summary_id,
313 $expand_path, $expand_limit, $expand_offsets,
315 1 + ($self->ctx->{is_staff} ? 1 : 0)
318 if ($holdings and ref $holdings eq "ARRAY") {
319 $self->place_holdings_with_summary(
320 $tree, $holdings, $summary_id, $summary_type
321 ) or $self->apache->log->warn(
322 "could not place holdings within summary tree"
325 $self->apache_log_if_event(
326 $holdings, "getting holdings grouped by summary $summary_id"
333 # The presence of any keys in the tree hash other than 'more' means that we
334 # must have /something/ we could show.
335 $self->ctx->{have_holdings_to_show} = grep { $_ ne 'more' } (keys %$tree);
337 $self->ctx->{holding_summary_tree} = $tree;
340 # This helper to load_serial_holding_summaries() recursively searches in
341 # $tree for a holding summary matching $sid and $stype, and places $holdings
342 # within the node for that summary. IOW, this is about showing expanded
343 # holdings under their "parent" summary.
344 sub place_holdings_with_summary {
345 my ($self, $tree, $holdings, $sid, $stype) = @_;
347 foreach my $sum (@{$tree->{holding_summaries}}) {
348 if ($sum->{id} == $sid and $sum->{summary_type} eq $stype) {
349 $sum->{holdings} = $holdings;
354 foreach my $child (@{$tree->{children}}) {
355 return 1 if $self->place_holdings_with_summary(
356 $child, $holdings, $sid, $stype
363 sub get_mfhd_summaries {
364 my ($self, $rec_id, $org, $depth) = @_;
366 my $serial = create OpenSRF::AppSession("open-ils.search");
367 my $result = $serial->request(
368 "open-ils.search.serial.record.bib.retrieve",
369 $rec_id, $org, $depth
376 sub any_call_number_label {
379 if ($self->ctx->{copies} and @{$self->ctx->{copies}}) {
380 return $self->ctx->{copies}->[0]->{call_number_label};
386 sub prepare_browse_call_numbers {
389 my $cn = ($self->cgi->param("cn") || $self->any_call_number_label) or
392 my $org_unit = $self->ctx->{get_aou}->($self->_get_search_lib()) ||
393 $self->ctx->{aou_tree}->();
395 my $supercat = create OpenSRF::AppSession("open-ils.supercat");
396 my $results = $supercat->request(
397 "open-ils.supercat.call_number.browse",
398 $cn, $org_unit->shortname, 9, $self->cgi->param("cnoffset")
403 $self->ctx->{browsed_call_numbers} = [
406 (new XML::LibXML)->parse_string($_->record->marc)
411 $self->ctx->{browsing_ou} = $org_unit;
414 sub get_hold_copy_summary {
415 my ($self, $rec_id, $org) = @_;
416 my $ctx = $self->ctx;
418 my $search = OpenSRF::AppSession->create('open-ils.search');
419 my $copy_count_meth = 'open-ils.search.biblio.record.copy_count';
420 # We want to include OPAC-invisible copies in a staff context
421 if ($ctx->{is_staff}) {
422 $copy_count_meth .= '.staff';
424 my $req1 = $search->request($copy_count_meth, $org, $rec_id);
426 # if org unit hiding applies, limit the hold count to holds
427 # whose pickup library is within our depth-scoped tree
429 while ($org and $ctx->{org_within_hiding_scope}->($org)) {
430 $count_args->{pickup_lib_descendant} = $org;
431 $org = $ctx->{get_aou}->($org)->parent_ou;
434 $self->ctx->{record_hold_count} = $U->simplereq(
435 'open-ils.circ', 'open-ils.circ.bre.holds.count',
436 $rec_id, $count_args);
438 $self->ctx->{copy_summary} = $req1->recv->content;
443 sub load_print_record {
446 my $rec_id = $self->ctx->{page_args}->[0]
447 or return Apache2::Const::HTTP_BAD_REQUEST;
449 $self->{ctx}->{bre_id} = $rec_id;
450 $self->{ctx}->{printable_record} = $U->simplereq(
452 'open-ils.search.biblio.record.print', $rec_id);
454 return Apache2::Const::OK;
457 sub load_email_record {
460 my $rec_id = $self->ctx->{page_args}->[0]
461 or return Apache2::Const::HTTP_BAD_REQUEST;
463 $self->{ctx}->{bre_id} = $rec_id;
466 'open-ils.search.biblio.record.email',
467 $self->ctx->{authtoken}, $rec_id);
469 return Apache2::Const::OK;
472 # for each type, fire off the reqeust to see if content is available
473 # ctx.added_content.$type.status:
477 sub added_content_stage1 {
480 my $ctx = $self->ctx;
481 my $sel_type = $self->cgi->param('ac') || '';
482 my $key = $self->get_ac_key($rec_id);
483 ($key = $key->{value}) =~ s/^\s+//g if $key;
485 # Connect to this machine's IP address, using the same
486 # Host with which our caller used to connect to us.
487 # This avoids us having to route out of the cluster
488 # and back in to reach the top-level virtualhost.
489 my $ac_addr = $ENV{SERVER_ADDR};
490 my $ac_host = $self->apache->hostname;
493 $logger->info("tpac: added content connecting to $ac_addr / $ac_host");
495 $ctx->{added_content} = {};
496 for my $type (@$ac_types) {
498 $ctx->{added_content}->{$type} = {content => ''};
499 $ctx->{added_content}->{$type}->{status} = $key ? 3 : 2;
502 $logger->debug("tpac: starting added content request for $key => $type");
504 # Net::HTTP::NB is non-blocking /after/ the initial connect()
505 # Passing Timeout=>1 ensures we wait no longer than 1 second to
506 # connect to the local Evergreen instance (i.e. ourself).
507 # Connecting to oneself should either be very fast (normal)
508 # or very slow (routing problems).
510 my $req = Net::HTTP::NB->new(Host => $ac_addr, Timeout => 1);
512 $logger->warn("Unable to connect to $ac_addr / $ac_host".
513 " for added content lookup for $key: $@");
518 $req->host($self->apache->hostname);
520 my $http_type = ($type eq $sel_type) ? 'GET' : 'HEAD';
521 $req->write_request($http_type => "/opac/extras/ac/$type/html/" . uri_escape_utf8($key));
522 $ctx->{added_content}->{$type}->{request} = $req;
527 # check each outstanding request. If it's ready, read the HTTP
528 # status and use it to determine if content is available. Otherwise,
529 # leave the status as unknown.
530 sub added_content_stage2 {
532 my $ctx = $self->ctx;
533 my $sel_type = $self->cgi->param('ac') || '';
535 for my $type (keys %{$ctx->{added_content}}) {
536 my $content = $ctx->{added_content}->{$type};
538 if ($content->{status} == 3) {
539 $logger->debug("tpac: finishing added content request for $type");
541 my $req = $content->{request};
542 my $sel = IO::Select->new($req);
544 # if we are requesting a specific type of content, give the
545 # backend code a little extra time to retrieve the content.
546 my $wait = $type eq $sel_type ? 3 : 0; # TODO: config?
548 if ($sel->can_read($wait)) {
549 my ($code) = $req->read_response_headers;
550 $content->{status} = $code eq '200' ? 1 : 2;
551 $logger->debug("tpac: added content request for $type returned $code");
553 if ($code eq '200' and $type eq $sel_type) {
556 my $n = $req->read_entity_body($buf, 1024);
558 $content->{content} .= $buf;
563 # To avoid a lot of hanging connections.
564 $content->{request}->shutdown(2) if ($content->{request});
568 # XXX this is copied directly from AddedContent.pm in
569 # working/user/jeff/ac_by_record_id_rebase. When Jeff's
570 # branch is merged and Evergreen gets added content
571 # lookup by ID, this can be removed.
572 # returns [{tag => $tag, value => $value}, {tag => $tag2, value => $value2}]
576 my $key_data = $self->editor->json_query({
577 select => {mfr => ['tag', 'value']},
599 grep {$_->{tag} eq '020'} @$key_data,
600 grep {$_->{tag} eq '024'} @$key_data