1 --Upgrade Script for 2.3 to 2.4.0RC
2 \set eg_version '''2.4.0RC'''
4 INSERT INTO config.upgrade_log (version, applied_to) VALUES ('2.4.0RC', :eg_version);
5 -- remove the Bypass hold capture during clear shelf process setting
7 SELECT evergreen.upgrade_deps_block_check('0739', :eg_version);
10 DELETE FROM actor.org_unit_setting WHERE name = 'circ.holds.clear_shelf.no_capture_holds';
11 DELETE FROM config.org_unit_setting_type_log WHERE field_name = 'circ.holds.clear_shelf.no_capture_holds';
14 DELETE FROM config.org_unit_setting_type WHERE name = 'circ.holds.clear_shelf.no_capture_holds';
17 SELECT evergreen.upgrade_deps_block_check('0741', :eg_version);
19 INSERT INTO permission.perm_list ( id, code, description ) VALUES (
21 'ADMIN_TOOLBAR_FOR_ORG',
24 'Allows a user to create, edit, and delete custom toolbars for org units',
30 'ADMIN_TOOLBAR_FOR_WORKSTATION',
33 'Allows a user to create, edit, and delete custom toolbars for workstations',
39 'ADMIN_TOOLBAR_FOR_USER',
42 'Allows a user to create, edit, and delete custom toolbars for users',
49 -- Evergreen DB patch 0743.schema.remove_tsearch2.sql
51 -- Enable native full-text search to be used, and drop TSearch2 extension
54 -- check whether patch can be applied
55 SELECT evergreen.upgrade_deps_block_check('0743', :eg_version);
57 -- FIXME: add/check SQL statements to perform the upgrade
58 -- First up, these functions depend on metabib.full_rec. They have to go for now.
59 DROP FUNCTION IF EXISTS biblio.flatten_marc(bigint);
60 DROP FUNCTION IF EXISTS biblio.flatten_marc(text);
62 -- These views depend on metabib.full_rec as well. Bye-bye!
63 DROP VIEW IF EXISTS reporter.old_super_simple_record;
64 DROP VIEW IF EXISTS reporter.simple_record;
65 DROP VIEW IF EXISTS reporter.classic_item_list;
67 \echo WARNING: The reporter.classic_item_list view was dropped if it existed.
68 \echo If you use that view, please run the example.reporter-extension.sql script
69 \echo to recreate it after rest of the schema upgrade is complete.
71 -- Now we can drop metabib.full_rec.
72 DROP VIEW IF EXISTS metabib.full_rec;
74 -- These indexes have to go. BEFORE we alter the tables, otherwise things take extra time when we alter the tables.
75 DROP INDEX metabib.metabib_author_field_entry_value_idx;
76 DROP INDEX metabib.metabib_identifier_field_entry_value_idx;
77 DROP INDEX metabib.metabib_keyword_field_entry_value_idx;
78 DROP INDEX metabib.metabib_series_field_entry_value_idx;
79 DROP INDEX metabib.metabib_subject_field_entry_value_idx;
80 DROP INDEX metabib.metabib_title_field_entry_value_idx;
82 -- Now grab all of the tsvector-enabled columns and switch them to the non-wrapper version of the type.
83 ALTER TABLE authority.full_rec ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
84 ALTER TABLE authority.simple_heading ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
85 ALTER TABLE metabib.real_full_rec ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
86 ALTER TABLE metabib.author_field_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
87 ALTER TABLE metabib.browse_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
88 ALTER TABLE metabib.identifier_field_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
89 ALTER TABLE metabib.keyword_field_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
90 ALTER TABLE metabib.series_field_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
91 ALTER TABLE metabib.subject_field_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
92 ALTER TABLE metabib.title_field_entry ALTER COLUMN index_vector TYPE pg_catalog.tsvector;
94 -- Halfway there! Goodbye tsearch2 extension!
95 DROP EXTENSION tsearch2;
97 -- Next up, re-creating all of the stuff we just dropped.
99 -- Indexes! Note to whomever: Do we even need these anymore?
100 CREATE INDEX metabib_author_field_entry_value_idx ON metabib.author_field_entry (SUBSTRING(value,1,1024)) WHERE index_vector = ''::TSVECTOR;
101 CREATE INDEX metabib_identifier_field_entry_value_idx ON metabib.identifier_field_entry (SUBSTRING(value,1,1024)) WHERE index_vector = ''::TSVECTOR;
102 CREATE INDEX metabib_keyword_field_entry_value_idx ON metabib.keyword_field_entry (SUBSTRING(value,1,1024)) WHERE index_vector = ''::TSVECTOR;
103 CREATE INDEX metabib_series_field_entry_value_idx ON metabib.series_field_entry (SUBSTRING(value,1,1024)) WHERE index_vector = ''::TSVECTOR;
104 CREATE INDEX metabib_subject_field_entry_value_idx ON metabib.subject_field_entry (SUBSTRING(value,1,1024)) WHERE index_vector = ''::TSVECTOR;
105 CREATE INDEX metabib_title_field_entry_value_idx ON metabib.title_field_entry (SUBSTRING(value,1,1024)) WHERE index_vector = ''::TSVECTOR;
107 -- metabib.full_rec, with insert/update/delete rules
108 CREATE OR REPLACE VIEW metabib.full_rec AS
115 SUBSTRING(value,1,1024) AS value,
117 FROM metabib.real_full_rec;
119 CREATE OR REPLACE RULE metabib_full_rec_insert_rule
120 AS ON INSERT TO metabib.full_rec
122 INSERT INTO metabib.real_full_rec VALUES (
123 COALESCE(NEW.id, NEXTVAL('metabib.full_rec_id_seq'::REGCLASS)),
133 CREATE OR REPLACE RULE metabib_full_rec_update_rule
134 AS ON UPDATE TO metabib.full_rec
136 UPDATE metabib.real_full_rec SET
142 subfield = NEW.subfield,
144 index_vector = NEW.index_vector
147 CREATE OR REPLACE RULE metabib_full_rec_delete_rule
148 AS ON DELETE TO metabib.full_rec
150 DELETE FROM metabib.real_full_rec WHERE id = OLD.id;
152 -- reporter views that depended on metabib.full_rec are up next
153 CREATE OR REPLACE VIEW reporter.simple_record AS
160 title.value AS title,
161 uniform_title.value AS uniform_title,
162 author.value AS author,
163 publisher.value AS publisher,
164 SUBSTRING(pubdate.value FROM $$\d+$$) AS pubdate,
165 series_title.value AS series_title,
166 series_statement.value AS series_statement,
167 summary.value AS summary,
168 ARRAY_ACCUM( DISTINCT REPLACE(SUBSTRING(isbn.value FROM $$^\S+$$), '-', '') ) AS isbn,
169 ARRAY_ACCUM( DISTINCT REGEXP_REPLACE(issn.value, E'^\\S*(\\d{4})[-\\s](\\d{3,4}x?)', E'\\1 \\2') ) AS issn,
170 ARRAY((SELECT DISTINCT value FROM metabib.full_rec WHERE tag = '650' AND subfield = 'a' AND record = r.id)) AS topic_subject,
171 ARRAY((SELECT DISTINCT value FROM metabib.full_rec WHERE tag = '651' AND subfield = 'a' AND record = r.id)) AS geographic_subject,
172 ARRAY((SELECT DISTINCT value FROM metabib.full_rec WHERE tag = '655' AND subfield = 'a' AND record = r.id)) AS genre,
173 ARRAY((SELECT DISTINCT value FROM metabib.full_rec WHERE tag = '600' AND subfield = 'a' AND record = r.id)) AS name_subject,
174 ARRAY((SELECT DISTINCT value FROM metabib.full_rec WHERE tag = '610' AND subfield = 'a' AND record = r.id)) AS corporate_subject,
175 ARRAY((SELECT value FROM metabib.full_rec WHERE tag = '856' AND subfield IN ('3','y','u') AND record = r.id ORDER BY CASE WHEN subfield IN ('3','y') THEN 0 ELSE 1 END)) AS external_uri
176 FROM biblio.record_entry r
177 JOIN metabib.metarecord_source_map s ON (s.source = r.id)
178 LEFT JOIN metabib.full_rec uniform_title ON (r.id = uniform_title.record AND uniform_title.tag = '240' AND uniform_title.subfield = 'a')
179 LEFT JOIN metabib.full_rec title ON (r.id = title.record AND title.tag = '245' AND title.subfield = 'a')
180 LEFT JOIN metabib.full_rec author ON (r.id = author.record AND author.tag = '100' AND author.subfield = 'a')
181 LEFT JOIN metabib.full_rec publisher ON (r.id = publisher.record AND publisher.tag = '260' AND publisher.subfield = 'b')
182 LEFT JOIN metabib.full_rec pubdate ON (r.id = pubdate.record AND pubdate.tag = '260' AND pubdate.subfield = 'c')
183 LEFT JOIN metabib.full_rec isbn ON (r.id = isbn.record AND isbn.tag IN ('024', '020') AND isbn.subfield IN ('a','z'))
184 LEFT JOIN metabib.full_rec issn ON (r.id = issn.record AND issn.tag = '022' AND issn.subfield = 'a')
185 LEFT JOIN metabib.full_rec series_title ON (r.id = series_title.record AND series_title.tag IN ('830','440') AND series_title.subfield = 'a')
186 LEFT JOIN metabib.full_rec series_statement ON (r.id = series_statement.record AND series_statement.tag = '490' AND series_statement.subfield = 'a')
187 LEFT JOIN metabib.full_rec summary ON (r.id = summary.record AND summary.tag = '520' AND summary.subfield = 'a')
188 GROUP BY 1,2,3,4,5,6,7,8,9,10,11,12,13,14;
190 CREATE OR REPLACE VIEW reporter.old_super_simple_record AS
196 FIRST(title.value) AS title,
197 FIRST(author.value) AS author,
198 ARRAY_TO_STRING(ARRAY_ACCUM( DISTINCT publisher.value), ', ') AS publisher,
199 ARRAY_TO_STRING(ARRAY_ACCUM( DISTINCT SUBSTRING(pubdate.value FROM $$\d+$$) ), ', ') AS pubdate,
200 ARRAY_ACCUM( DISTINCT REPLACE(SUBSTRING(isbn.value FROM $$^\S+$$), '-', '') ) AS isbn,
201 ARRAY_ACCUM( DISTINCT REGEXP_REPLACE(issn.value, E'^\\S*(\\d{4})[-\\s](\\d{3,4}x?)', E'\\1 \\2') ) AS issn
202 FROM biblio.record_entry r
203 LEFT JOIN metabib.full_rec title ON (r.id = title.record AND title.tag = '245' AND title.subfield = 'a')
204 LEFT JOIN metabib.full_rec author ON (r.id = author.record AND author.tag IN ('100','110','111') AND author.subfield = 'a')
205 LEFT JOIN metabib.full_rec publisher ON (r.id = publisher.record AND publisher.tag = '260' AND publisher.subfield = 'b')
206 LEFT JOIN metabib.full_rec pubdate ON (r.id = pubdate.record AND pubdate.tag = '260' AND pubdate.subfield = 'c')
207 LEFT JOIN metabib.full_rec isbn ON (r.id = isbn.record AND isbn.tag IN ('024', '020') AND isbn.subfield IN ('a','z'))
208 LEFT JOIN metabib.full_rec issn ON (r.id = issn.record AND issn.tag = '022' AND issn.subfield = 'a')
211 -- And finally, the biblio functions. NOTE: I can't find the original source of the second one, so I skipped it as old cruft that was in our production DB.
212 CREATE OR REPLACE FUNCTION biblio.flatten_marc ( rid BIGINT ) RETURNS SETOF metabib.full_rec AS $func$
214 bib biblio.record_entry%ROWTYPE;
215 output metabib.full_rec%ROWTYPE;
218 SELECT INTO bib * FROM biblio.record_entry WHERE id = rid;
220 FOR field IN SELECT * FROM vandelay.flatten_marc( bib.marc ) LOOP
221 output.record := rid;
222 output.ind1 := field.ind1;
223 output.ind2 := field.ind2;
224 output.tag := field.tag;
225 output.subfield := field.subfield;
226 output.value := field.value;
231 $func$ LANGUAGE PLPGSQL;
233 -- Evergreen DB patch 0745.data.prewarn_expire_setting.sql
235 -- Configuration setting to warn staff when an account is about to expire
238 -- check whether patch can be applied
239 SELECT evergreen.upgrade_deps_block_check('0745', :eg_version);
241 INSERT INTO config.org_unit_setting_type
242 (name, grp, label, description, datatype)
244 'circ.patron_expires_soon_warning',
247 'circ.patron_expires_soon_warning',
248 'Warn when patron account is about to expire',
253 'circ.patron_expires_soon_warning',
254 'Warn when patron account is about to expire. If set, the staff client displays a warning this many days before the expiry of a patron account. Value is in number of days, for example: 3 for 3 days.',
261 -- LP1076399: Prevent reactivated holds from canceling immediately.
262 -- Set the expire_time to NULL on all frozen/suspended holds.
264 SELECT evergreen.upgrade_deps_block_check('0747', :eg_version);
266 UPDATE action.hold_request
267 SET expire_time = NULL
271 SELECT evergreen.upgrade_deps_block_check('0752', :eg_version);
273 INSERT INTO container.biblio_record_entry_bucket_type (code, label) VALUES ('url_verify', 'URL Verification Queue');
275 DROP SCHEMA IF EXISTS url_verify CASCADE;
277 CREATE SCHEMA url_verify;
279 CREATE TABLE url_verify.session (
280 id SERIAL PRIMARY KEY,
282 owning_lib INT NOT NULL REFERENCES actor.org_unit (id) DEFERRABLE INITIALLY DEFERRED,
283 creator INT NOT NULL REFERENCES actor.usr (id) DEFERRABLE INITIALLY DEFERRED,
284 container INT NOT NULL REFERENCES container.biblio_record_entry_bucket (id) DEFERRABLE INITIALLY DEFERRED,
285 create_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
286 search TEXT NOT NULL,
287 CONSTRAINT uvs_name_once_per_lib UNIQUE (name, owning_lib)
290 CREATE TABLE url_verify.url_selector (
291 id SERIAL PRIMARY KEY,
293 session INT NOT NULL REFERENCES url_verify.session (id) DEFERRABLE INITIALLY DEFERRED,
294 CONSTRAINT tag_once_per_sess UNIQUE (xpath, session)
297 CREATE TABLE url_verify.url (
298 id SERIAL PRIMARY KEY,
299 redirect_from INT REFERENCES url_verify.url(id) DEFERRABLE INITIALLY DEFERRED,
300 item INT REFERENCES container.biblio_record_entry_bucket_item (id) DEFERRABLE INITIALLY DEFERRED,
301 url_selector INT REFERENCES url_verify.url_selector (id) DEFERRABLE INITIALLY DEFERRED,
302 session INT REFERENCES url_verify.session (id) DEFERRABLE INITIALLY DEFERRED,
306 full_url TEXT NOT NULL,
318 CONSTRAINT redirect_or_from_item CHECK (
319 redirect_from IS NOT NULL OR (
321 url_selector IS NOT NULL AND
323 subfield IS NOT NULL AND
329 CREATE TABLE url_verify.verification_attempt (
330 id SERIAL PRIMARY KEY,
331 usr INT NOT NULL REFERENCES actor.usr (id) DEFERRABLE INITIALLY DEFERRED,
332 session INT NOT NULL REFERENCES url_verify.session (id) DEFERRABLE INITIALLY DEFERRED,
333 start_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
334 finish_time TIMESTAMP WITH TIME ZONE
337 CREATE TABLE url_verify.url_verification (
338 id SERIAL PRIMARY KEY,
339 url INT NOT NULL REFERENCES url_verify.url (id) DEFERRABLE INITIALLY DEFERRED,
340 attempt INT NOT NULL REFERENCES url_verify.verification_attempt (id) DEFERRABLE INITIALLY DEFERRED,
341 req_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
342 res_time TIMESTAMP WITH TIME ZONE,
343 res_code INT CHECK (res_code BETWEEN 100 AND 999), -- we know > 599 will never be valid HTTP code, but we use 9XX for other stuff
345 redirect_to INT REFERENCES url_verify.url (id) DEFERRABLE INITIALLY DEFERRED -- if redirected
348 CREATE TABLE config.filter_dialog_interface (
349 key TEXT PRIMARY KEY,
353 CREATE TABLE config.filter_dialog_filter_set (
354 id SERIAL PRIMARY KEY,
356 owning_lib INT NOT NULL REFERENCES actor.org_unit (id) DEFERRABLE INITIALLY DEFERRED,
357 creator INT NOT NULL REFERENCES actor.usr (id) DEFERRABLE INITIALLY DEFERRED,
358 create_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
359 interface TEXT NOT NULL REFERENCES config.filter_dialog_interface (key) DEFERRABLE INITIALLY DEFERRED,
360 filters TEXT NOT NULL CHECK (is_json(filters)),
361 CONSTRAINT cfdfs_name_once_per_lib UNIQUE (name, owning_lib)
365 SELECT evergreen.upgrade_deps_block_check('0753', :eg_version);
367 CREATE OR REPLACE FUNCTION url_verify.parse_url (url_in TEXT) RETURNS url_verify.url AS $$
372 my $url = Rose::URI->new($url_in);
374 my %parts = map { $_ => $url->$_ } qw/scheme username password host port path query fragment/;
376 $parts{full_url} = $url_in;
377 ($parts{domain} = $parts{host}) =~ s/^[^.]+\.//;
378 ($parts{tld} = $parts{domain}) =~ s/(?:[^.]+\.)+//;
379 ($parts{page} = $parts{path}) =~ s#(?:[^/]*/)+##;
385 CREATE OR REPLACE FUNCTION url_verify.ingest_url () RETURNS TRIGGER AS $$
387 tmp_row url_verify.url%ROWTYPE;
389 SELECT * INTO tmp_row FROM url_verify.parse_url(NEW.full_url);
391 NEW.scheme := tmp_row.scheme;
392 NEW.username := tmp_row.username;
393 NEW.password := tmp_row.password;
394 NEW.host := tmp_row.host;
395 NEW.domain := tmp_row.domain;
396 NEW.tld := tmp_row.tld;
397 NEW.port := tmp_row.port;
398 NEW.path := tmp_row.path;
399 NEW.page := tmp_row.page;
400 NEW.query := tmp_row.query;
401 NEW.fragment := tmp_row.fragment;
407 CREATE TRIGGER ingest_url_tgr
408 BEFORE INSERT ON url_verify.url
409 FOR EACH ROW EXECUTE PROCEDURE url_verify.ingest_url();
411 CREATE OR REPLACE FUNCTION url_verify.extract_urls ( session_id INT, item_id INT ) RETURNS INT AS $$
419 current_selector url_verify.url_selector%ROWTYPE;
423 FOR current_selector IN SELECT * FROM url_verify.url_selector s WHERE s.session = session_id LOOP
424 current_url_pos := 1;
426 SELECT (XPATH(current_selector.xpath || '/text()', b.marc::XML))[current_url_pos]::TEXT INTO current_url
427 FROM biblio.record_entry b
428 JOIN container.biblio_record_entry_bucket_item c ON (c.target_biblio_record_entry = b.id)
429 WHERE c.id = item_id;
431 EXIT WHEN current_url IS NULL;
433 SELECT (XPATH(current_selector.xpath || '/../@tag', b.marc::XML))[current_url_pos]::TEXT INTO current_tag
434 FROM biblio.record_entry b
435 JOIN container.biblio_record_entry_bucket_item c ON (c.target_biblio_record_entry = b.id)
436 WHERE c.id = item_id;
438 IF current_tag IS NULL THEN
439 current_tag := last_seen_tag;
441 last_seen_tag := current_tag;
444 SELECT (XPATH(current_selector.xpath || '/@code', b.marc::XML))[current_url_pos]::TEXT INTO current_sf
445 FROM biblio.record_entry b
446 JOIN container.biblio_record_entry_bucket_item c ON (c.target_biblio_record_entry = b.id)
447 WHERE c.id = item_id;
449 INSERT INTO url_verify.url (session, item, url_selector, tag, subfield, ord, full_url)
450 VALUES ( session_id, item_id, current_selector.id, current_tag, current_sf, current_ord, current_url);
452 current_url_pos := current_url_pos + 1;
453 current_ord := current_ord + 1;
457 RETURN current_ord - 1;
463 -- NOTE: beware the use of bare perm IDs in the update_perm's below and in
464 -- the 950 seed data file. Update before merge to match current perm IDs! XXX
467 SELECT evergreen.upgrade_deps_block_check('0754', :eg_version);
469 INSERT INTO permission.perm_list (id, code, description)
475 'Allows a user to process and verify ULSs',
482 INSERT INTO permission.perm_list (id, code, description)
485 'URL_VERIFY_UPDATE_SETTINGS',
488 'Allows a user to configure URL verification org unit settings',
495 INSERT INTO permission.perm_list (id, code, description)
498 'SAVED_FILTER_DIALOG_FILTERS',
501 'Allows users to save and load sets of filters for filter dialogs, available in certain staff interfaces',
508 INSERT INTO config.settings_group (name, label)
519 INSERT INTO config.org_unit_setting_type
520 (name, grp, label, description, datatype, update_perm)
522 'url_verify.url_verification_delay',
525 'url_verify.url_verification_delay',
526 'Number of seconds to wait between URL test attempts.',
531 'url_verify.url_verification_delay',
532 'Throttling mechanism for batch URL verification runs. Each running process will wait this number of seconds after a URL test before performing the next.',
540 INSERT INTO config.org_unit_setting_type
541 (name, grp, label, description, datatype, update_perm)
543 'url_verify.url_verification_max_redirects',
546 'url_verify.url_verification_max_redirects',
547 'Maximum redirect lookups',
552 'url_verify.url_verification_max_redirects',
553 'For URLs returning 3XX redirects, this is the maximum number of redirects we will follow before giving up.',
561 INSERT INTO config.org_unit_setting_type
562 (name, grp, label, description, datatype, update_perm)
564 'url_verify.url_verification_max_wait',
567 'url_verify.url_verification_max_wait',
568 'Maximum wait time (in seconds) for a URL to lookup',
573 'url_verify.url_verification_max_wait',
574 'If we exceed the wait time, the URL is marked as a "timeout" and the system moves on to the next URL',
583 INSERT INTO config.org_unit_setting_type
584 (name, grp, label, description, datatype, update_perm)
586 'url_verify.verification_batch_size',
589 'url_verify.verification_batch_size',
590 'Number of URLs to test in parallel',
595 'url_verify.verification_batch_size',
596 'URLs are tested in batches. This number defines the size of each batch and it directly relates to the number of back-end processes performing URL verification.',
605 INSERT INTO config.filter_dialog_interface (key, description) VALUES (
609 'All Link Checker filter dialogs',
616 INSERT INTO config.usr_setting_type (name,grp,opac_visible,label,description,datatype) VALUES (
617 'ui.grid_columns.url_verify.select_urls',
621 'ui.grid_columns.url_verify.select_urls',
622 'Link Checker''s URL Selection interface''s saved columns',
627 'ui.grid_columns.url_verify.select_urls',
628 'Link Checker''s URL Selection interface''s saved columns',
635 INSERT INTO config.usr_setting_type (name,grp,opac_visible,label,description,datatype) VALUES (
636 'ui.grid_columns.url_verify.review_attempt',
640 'ui.grid_columns.url_verify.review_attempt',
641 'Link Checker''s Review Attempt interface''s saved columns',
646 'ui.grid_columns.url_verify.review_attempt',
647 'Link Checker''s Review Attempt interface''s saved columns',
657 SELECT evergreen.upgrade_deps_block_check('0755', :eg_version);
659 INSERT INTO config.org_unit_setting_type
660 (name, label, description, grp, datatype, fm_class) VALUES
662 'acq.upload.default.create_po',
664 'acq.upload.default.create_po',
670 'acq.upload.default.create_po',
671 'Create a purchase order by default during ACQ file upload',
679 'acq.upload.default.activate_po',
681 'acq.upload.default.activate_po',
682 'Upload Activate PO',
687 'acq.upload.default.activate_po',
688 'Activate the purchase order by default during ACQ file upload',
696 'acq.upload.default.provider',
698 'acq.upload.default.provider',
699 'Upload Default Provider',
704 'acq.upload.default.provider',
705 'Default provider to use during ACQ file upload',
713 'acq.upload.default.vandelay.match_set',
715 'acq.upload.default.vandelay.match_set',
716 'Upload Default Match Set',
721 'acq.upload.default.vandelay.match_set',
722 'Default match set to use during ACQ file upload',
730 'acq.upload.default.vandelay.merge_profile',
732 'acq.upload.default.vandelay.merge_profile',
733 'Upload Default Merge Profile',
738 'acq.upload.default.vandelay.merge_profile',
739 'Default merge profile to use during ACQ file upload',
747 'acq.upload.default.vandelay.import_non_matching',
749 'acq.upload.default.vandelay.import_non_matching',
750 'Upload Import Non Matching by Default',
755 'acq.upload.default.vandelay.import_non_matching',
756 'Import non-matching records by default during ACQ file upload',
764 'acq.upload.default.vandelay.merge_on_exact',
766 'acq.upload.default.vandelay.merge_on_exact',
767 'Upload Merge on Exact Match by Default',
772 'acq.upload.default.vandelay.merge_on_exact',
773 'Merge records on exact match by default during ACQ file upload',
781 'acq.upload.default.vandelay.merge_on_best',
783 'acq.upload.default.vandelay.merge_on_best',
784 'Upload Merge on Best Match by Default',
789 'acq.upload.default.vandelay.merge_on_best',
790 'Merge records on best match by default during ACQ file upload',
798 'acq.upload.default.vandelay.merge_on_single',
800 'acq.upload.default.vandelay.merge_on_single',
801 'Upload Merge on Single Match by Default',
806 'acq.upload.default.vandelay.merge_on_single',
807 'Merge records on single match by default during ACQ file upload',
815 'acq.upload.default.vandelay.quality_ratio',
817 'acq.upload.default.vandelay.quality_ratio',
818 'Upload Default Min. Quality Ratio',
823 'acq.upload.default.vandelay.quality_ratio',
824 'Default minimum quality ratio used during ACQ file upload',
832 'acq.upload.default.vandelay.low_quality_fall_thru_profile',
834 'acq.upload.default.vandelay.low_quality_fall_thru_profile',
835 'Upload Default Insufficient Quality Fall-Thru Profile',
840 'acq.upload.default.vandelay.low_quality_fall_thru_profile',
841 'Default low-quality fall through profile used during ACQ file upload',
849 'acq.upload.default.vandelay.load_item_for_imported',
851 'acq.upload.default.vandelay.load_item_for_imported',
852 'Upload Load Items for Imported Records by Default',
857 'acq.upload.default.vandelay.load_item_for_imported',
858 'Load items for imported records by default during ACQ file upload',
868 SELECT evergreen.upgrade_deps_block_check('0756', :eg_version);
870 -- Drop some lingering old functions in search schema
871 DROP FUNCTION IF EXISTS search.staged_fts(INT,INT,TEXT,INT[],INT[],TEXT[],TEXT[],TEXT[],TEXT[],TEXT[],TEXT[],TEXT[],TEXT,TEXT,TEXT,TEXT[],TEXT,REAL,TEXT,BOOL,BOOL,BOOL,INT,INT,INT);
872 DROP FUNCTION IF EXISTS search.parse_search_args(TEXT);
873 DROP FUNCTION IF EXISTS search.explode_array(ANYARRAY);
874 DROP FUNCTION IF EXISTS search.pick_table(TEXT);
876 -- Now drop query_parser_fts and related
877 DROP FUNCTION IF EXISTS search.query_parser_fts(INT,INT,TEXT,INT[],INT[],INT,INT,INT,BOOL,BOOL,INT);
878 DROP TYPE IF EXISTS search.search_result;
879 DROP TYPE IF EXISTS search.search_args;
882 SELECT evergreen.upgrade_deps_block_check('0757', :eg_version);
884 SET search_path = public, pg_catalog;
890 FOR lang IN SELECT substring(pptsd.dictname from '(.*)_stem$') AS lang FROM pg_catalog.pg_ts_dict pptsd JOIN pg_catalog.pg_namespace ppn ON ppn.oid = pptsd.dictnamespace
891 WHERE ppn.nspname = 'pg_catalog' AND pptsd.dictname LIKE '%_stem' LOOP
892 RAISE NOTICE 'FOUND LANGUAGE %', lang;
894 EXECUTE 'DROP TEXT SEARCH DICTIONARY IF EXISTS ' || lang || '_nostop CASCADE;
895 CREATE TEXT SEARCH DICTIONARY ' || lang || '_nostop (TEMPLATE=pg_catalog.snowball, language=''' || lang || ''');
896 COMMENT ON TEXT SEARCH DICTIONARY ' || lang || '_nostop IS ''' ||lang || ' snowball stemmer with no stopwords for ASCII words only.'';
897 CREATE TEXT SEARCH CONFIGURATION ' || lang || '_nostop ( COPY = pg_catalog.' || lang || ' );
898 ALTER TEXT SEARCH CONFIGURATION ' || lang || '_nostop ALTER MAPPING FOR word, hword, hword_part WITH pg_catalog.simple;
899 ALTER TEXT SEARCH CONFIGURATION ' || lang || '_nostop ALTER MAPPING FOR asciiword, asciihword, hword_asciipart WITH ' || lang || '_nostop;';
904 CREATE TEXT SEARCH CONFIGURATION keyword ( COPY = english_nostop );
905 CREATE TEXT SEARCH CONFIGURATION "default" ( COPY = english_nostop );
907 SET search_path = evergreen, public, pg_catalog;
909 ALTER TABLE config.metabib_class
910 ADD COLUMN a_weight NUMERIC DEFAULT 1.0 NOT NULL,
911 ADD COLUMN b_weight NUMERIC DEFAULT 0.4 NOT NULL,
912 ADD COLUMN c_weight NUMERIC DEFAULT 0.2 NOT NULL,
913 ADD COLUMN d_weight NUMERIC DEFAULT 0.1 NOT NULL;
915 CREATE TABLE config.ts_config_list (
919 COMMENT ON TABLE config.ts_config_list IS $$
922 A list of full text configs with names and descriptions.
925 CREATE TABLE config.metabib_class_ts_map (
926 id SERIAL PRIMARY KEY,
927 field_class TEXT NOT NULL REFERENCES config.metabib_class (name),
928 ts_config TEXT NOT NULL REFERENCES config.ts_config_list (id),
929 active BOOL NOT NULL DEFAULT TRUE,
930 index_weight CHAR(1) NOT NULL DEFAULT 'C' CHECK (index_weight IN ('A','B','C','D')),
931 index_lang TEXT NULL,
932 search_lang TEXT NULL,
933 always BOOL NOT NULL DEFAULT true
935 COMMENT ON TABLE config.metabib_class_ts_map IS $$
936 Text Search Configs for metabib class indexing
938 This table contains text search config definitions for
939 storing index_vector values.
942 CREATE TABLE config.metabib_field_ts_map (
943 id SERIAL PRIMARY KEY,
944 metabib_field INT NOT NULL REFERENCES config.metabib_field (id),
945 ts_config TEXT NOT NULL REFERENCES config.ts_config_list (id),
946 active BOOL NOT NULL DEFAULT TRUE,
947 index_weight CHAR(1) NOT NULL DEFAULT 'C' CHECK (index_weight IN ('A','B','C','D')),
948 index_lang TEXT NULL,
949 search_lang TEXT NULL
951 COMMENT ON TABLE config.metabib_field_ts_map IS $$
952 Text Search Configs for metabib field indexing
954 This table contains text search config definitions for
955 storing index_vector values.
958 CREATE TABLE metabib.combined_identifier_field_entry (
959 record BIGINT NOT NULL,
960 metabib_field INT NULL,
961 index_vector tsvector NOT NULL
963 CREATE UNIQUE INDEX metabib_combined_identifier_field_entry_fakepk_idx ON metabib.combined_identifier_field_entry (record, COALESCE(metabib_field::TEXT,''));
964 CREATE INDEX metabib_combined_identifier_field_entry_index_vector_idx ON metabib.combined_identifier_field_entry USING GIST (index_vector);
965 CREATE INDEX metabib_combined_identifier_field_source_idx ON metabib.combined_identifier_field_entry (metabib_field);
967 CREATE TABLE metabib.combined_title_field_entry (
968 record BIGINT NOT NULL,
969 metabib_field INT NULL,
970 index_vector tsvector NOT NULL
972 CREATE UNIQUE INDEX metabib_combined_title_field_entry_fakepk_idx ON metabib.combined_title_field_entry (record, COALESCE(metabib_field::TEXT,''));
973 CREATE INDEX metabib_combined_title_field_entry_index_vector_idx ON metabib.combined_title_field_entry USING GIST (index_vector);
974 CREATE INDEX metabib_combined_title_field_source_idx ON metabib.combined_title_field_entry (metabib_field);
976 CREATE TABLE metabib.combined_author_field_entry (
977 record BIGINT NOT NULL,
978 metabib_field INT NULL,
979 index_vector tsvector NOT NULL
981 CREATE UNIQUE INDEX metabib_combined_author_field_entry_fakepk_idx ON metabib.combined_author_field_entry (record, COALESCE(metabib_field::TEXT,''));
982 CREATE INDEX metabib_combined_author_field_entry_index_vector_idx ON metabib.combined_author_field_entry USING GIST (index_vector);
983 CREATE INDEX metabib_combined_author_field_source_idx ON metabib.combined_author_field_entry (metabib_field);
985 CREATE TABLE metabib.combined_subject_field_entry (
986 record BIGINT NOT NULL,
987 metabib_field INT NULL,
988 index_vector tsvector NOT NULL
990 CREATE UNIQUE INDEX metabib_combined_subject_field_entry_fakepk_idx ON metabib.combined_subject_field_entry (record, COALESCE(metabib_field::TEXT,''));
991 CREATE INDEX metabib_combined_subject_field_entry_index_vector_idx ON metabib.combined_subject_field_entry USING GIST (index_vector);
992 CREATE INDEX metabib_combined_subject_field_source_idx ON metabib.combined_subject_field_entry (metabib_field);
994 CREATE TABLE metabib.combined_keyword_field_entry (
995 record BIGINT NOT NULL,
996 metabib_field INT NULL,
997 index_vector tsvector NOT NULL
999 CREATE UNIQUE INDEX metabib_combined_keyword_field_entry_fakepk_idx ON metabib.combined_keyword_field_entry (record, COALESCE(metabib_field::TEXT,''));
1000 CREATE INDEX metabib_combined_keyword_field_entry_index_vector_idx ON metabib.combined_keyword_field_entry USING GIST (index_vector);
1001 CREATE INDEX metabib_combined_keyword_field_source_idx ON metabib.combined_keyword_field_entry (metabib_field);
1003 CREATE TABLE metabib.combined_series_field_entry (
1004 record BIGINT NOT NULL,
1005 metabib_field INT NULL,
1006 index_vector tsvector NOT NULL
1008 CREATE UNIQUE INDEX metabib_combined_series_field_entry_fakepk_idx ON metabib.combined_series_field_entry (record, COALESCE(metabib_field::TEXT,''));
1009 CREATE INDEX metabib_combined_series_field_entry_index_vector_idx ON metabib.combined_series_field_entry USING GIST (index_vector);
1010 CREATE INDEX metabib_combined_series_field_source_idx ON metabib.combined_series_field_entry (metabib_field);
1012 CREATE OR REPLACE FUNCTION metabib.update_combined_index_vectors(bib_id BIGINT) RETURNS VOID AS $func$
1014 DELETE FROM metabib.combined_keyword_field_entry WHERE record = bib_id;
1015 INSERT INTO metabib.combined_keyword_field_entry(record, metabib_field, index_vector)
1016 SELECT bib_id, field, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1017 FROM metabib.keyword_field_entry WHERE source = bib_id GROUP BY field;
1018 INSERT INTO metabib.combined_keyword_field_entry(record, metabib_field, index_vector)
1019 SELECT bib_id, NULL, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1020 FROM metabib.keyword_field_entry WHERE source = bib_id;
1022 DELETE FROM metabib.combined_title_field_entry WHERE record = bib_id;
1023 INSERT INTO metabib.combined_title_field_entry(record, metabib_field, index_vector)
1024 SELECT bib_id, field, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1025 FROM metabib.title_field_entry WHERE source = bib_id GROUP BY field;
1026 INSERT INTO metabib.combined_title_field_entry(record, metabib_field, index_vector)
1027 SELECT bib_id, NULL, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1028 FROM metabib.title_field_entry WHERE source = bib_id;
1030 DELETE FROM metabib.combined_author_field_entry WHERE record = bib_id;
1031 INSERT INTO metabib.combined_author_field_entry(record, metabib_field, index_vector)
1032 SELECT bib_id, field, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1033 FROM metabib.author_field_entry WHERE source = bib_id GROUP BY field;
1034 INSERT INTO metabib.combined_author_field_entry(record, metabib_field, index_vector)
1035 SELECT bib_id, NULL, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1036 FROM metabib.author_field_entry WHERE source = bib_id;
1038 DELETE FROM metabib.combined_subject_field_entry WHERE record = bib_id;
1039 INSERT INTO metabib.combined_subject_field_entry(record, metabib_field, index_vector)
1040 SELECT bib_id, field, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1041 FROM metabib.subject_field_entry WHERE source = bib_id GROUP BY field;
1042 INSERT INTO metabib.combined_subject_field_entry(record, metabib_field, index_vector)
1043 SELECT bib_id, NULL, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1044 FROM metabib.subject_field_entry WHERE source = bib_id;
1046 DELETE FROM metabib.combined_series_field_entry WHERE record = bib_id;
1047 INSERT INTO metabib.combined_series_field_entry(record, metabib_field, index_vector)
1048 SELECT bib_id, field, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1049 FROM metabib.series_field_entry WHERE source = bib_id GROUP BY field;
1050 INSERT INTO metabib.combined_series_field_entry(record, metabib_field, index_vector)
1051 SELECT bib_id, NULL, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1052 FROM metabib.series_field_entry WHERE source = bib_id;
1054 DELETE FROM metabib.combined_identifier_field_entry WHERE record = bib_id;
1055 INSERT INTO metabib.combined_identifier_field_entry(record, metabib_field, index_vector)
1056 SELECT bib_id, field, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1057 FROM metabib.identifier_field_entry WHERE source = bib_id GROUP BY field;
1058 INSERT INTO metabib.combined_identifier_field_entry(record, metabib_field, index_vector)
1059 SELECT bib_id, NULL, strip(COALESCE(string_agg(index_vector::TEXT,' '),'')::tsvector)
1060 FROM metabib.identifier_field_entry WHERE source = bib_id;
1063 $func$ LANGUAGE PLPGSQL;
1065 CREATE OR REPLACE FUNCTION metabib.reingest_metabib_field_entries( bib_id BIGINT, skip_facet BOOL DEFAULT FALSE, skip_browse BOOL DEFAULT FALSE, skip_search BOOL DEFAULT FALSE ) RETURNS VOID AS $func$
1068 ind_data metabib.field_entry_template%ROWTYPE;
1069 mbe_row metabib.browse_entry%ROWTYPE;
1072 PERFORM * FROM config.internal_flag WHERE name = 'ingest.assume_inserts_only' AND enabled;
1074 IF NOT skip_search THEN
1075 FOR fclass IN SELECT * FROM config.metabib_class LOOP
1076 -- RAISE NOTICE 'Emptying out %', fclass.name;
1077 EXECUTE $$DELETE FROM metabib.$$ || fclass.name || $$_field_entry WHERE source = $$ || bib_id;
1080 IF NOT skip_facet THEN
1081 DELETE FROM metabib.facet_entry WHERE source = bib_id;
1083 IF NOT skip_browse THEN
1084 DELETE FROM metabib.browse_entry_def_map WHERE source = bib_id;
1088 FOR ind_data IN SELECT * FROM biblio.extract_metabib_field_entry( bib_id ) LOOP
1089 IF ind_data.field < 0 THEN
1090 ind_data.field = -1 * ind_data.field;
1093 IF ind_data.facet_field AND NOT skip_facet THEN
1094 INSERT INTO metabib.facet_entry (field, source, value)
1095 VALUES (ind_data.field, ind_data.source, ind_data.value);
1098 IF ind_data.browse_field AND NOT skip_browse THEN
1099 -- A caveat about this SELECT: this should take care of replacing
1100 -- old mbe rows when data changes, but not if normalization (by
1101 -- which I mean specifically the output of
1102 -- evergreen.oils_tsearch2()) changes. It may or may not be
1103 -- expensive to add a comparison of index_vector to index_vector
1104 -- to the WHERE clause below.
1105 SELECT INTO mbe_row * FROM metabib.browse_entry WHERE value = ind_data.value;
1107 mbe_id := mbe_row.id;
1109 INSERT INTO metabib.browse_entry (value) VALUES
1110 (metabib.browse_normalize(ind_data.value, ind_data.field));
1111 mbe_id := CURRVAL('metabib.browse_entry_id_seq'::REGCLASS);
1114 INSERT INTO metabib.browse_entry_def_map (entry, def, source)
1115 VALUES (mbe_id, ind_data.field, ind_data.source);
1118 IF ind_data.search_field AND NOT skip_search THEN
1120 INSERT INTO metabib.$$ || ind_data.field_class || $$_field_entry (field, source, value)
1122 quote_literal(ind_data.field) || $$, $$ ||
1123 quote_literal(ind_data.source) || $$, $$ ||
1124 quote_literal(ind_data.value) ||
1130 IF NOT skip_search THEN
1131 PERFORM metabib.update_combined_index_vectors(bib_id);
1136 $func$ LANGUAGE PLPGSQL;
1138 DROP FUNCTION IF EXISTS evergreen.oils_tsearch2() CASCADE;
1139 DROP FUNCTION IF EXISTS public.oils_tsearch2() CASCADE;
1141 CREATE OR REPLACE FUNCTION public.oils_tsearch2 () RETURNS TRIGGER AS $$
1145 temp_vector TEXT := '';
1150 NEW.index_vector = ''::tsvector;
1152 IF TG_TABLE_NAME::TEXT ~ 'field_entry$' THEN
1154 SELECT n.func AS func,
1155 n.param_count AS param_count,
1157 FROM config.index_normalizer n
1158 JOIN config.metabib_field_index_norm_map m ON (m.norm = n.id)
1159 WHERE field = NEW.field
1161 EXECUTE 'SELECT ' || normalizer.func || '(' ||
1162 quote_literal( value ) ||
1164 WHEN normalizer.param_count > 0
1165 THEN ',' || REPLACE(REPLACE(BTRIM(normalizer.params,'[]'),E'\'',E'\\\''),E'"',E'\'')
1174 IF TG_TABLE_NAME::TEXT ~ 'browse_entry$' THEN
1175 value := ARRAY_TO_STRING(
1176 evergreen.regexp_split_to_array(value, E'\\W+'), ' '
1178 value := public.search_normalize(value);
1179 NEW.index_vector = to_tsvector(TG_ARGV[0]::regconfig, value);
1180 ELSIF TG_TABLE_NAME::TEXT ~ 'field_entry$' THEN
1182 SELECT ts_config, index_weight
1183 FROM config.metabib_class_ts_map
1184 WHERE field_class = TG_ARGV[0]
1185 AND index_lang IS NULL OR EXISTS (SELECT 1 FROM metabib.record_attr WHERE id = NEW.source AND index_lang IN(attrs->'item_lang',attrs->'language'))
1186 AND always OR NOT EXISTS (SELECT 1 FROM config.metabib_field_ts_map WHERE metabib_field = NEW.field)
1188 SELECT ts_config, index_weight
1189 FROM config.metabib_field_ts_map
1190 WHERE metabib_field = NEW.field
1191 AND index_lang IS NULL OR EXISTS (SELECT 1 FROM metabib.record_attr WHERE id = NEW.source AND index_lang IN(attrs->'item_lang',attrs->'language'))
1192 ORDER BY index_weight ASC
1194 IF cur_weight IS NOT NULL AND cur_weight != ts_rec.index_weight THEN
1195 NEW.index_vector = NEW.index_vector || setweight(temp_vector::tsvector,cur_weight);
1198 cur_weight = ts_rec.index_weight;
1199 SELECT INTO temp_vector temp_vector || ' ' || to_tsvector(ts_rec.ts_config::regconfig, value)::TEXT;
1201 NEW.index_vector = NEW.index_vector || setweight(temp_vector::tsvector,cur_weight);
1203 NEW.index_vector = to_tsvector(TG_ARGV[0]::regconfig, value);
1208 $$ LANGUAGE PLPGSQL;
1210 CREATE TRIGGER authority_full_rec_fti_trigger
1211 BEFORE UPDATE OR INSERT ON authority.full_rec
1212 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('keyword');
1214 CREATE TRIGGER authority_simple_heading_fti_trigger
1215 BEFORE UPDATE OR INSERT ON authority.simple_heading
1216 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('keyword');
1218 CREATE TRIGGER metabib_identifier_field_entry_fti_trigger
1219 BEFORE UPDATE OR INSERT ON metabib.identifier_field_entry
1220 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('identifier');
1222 CREATE TRIGGER metabib_title_field_entry_fti_trigger
1223 BEFORE UPDATE OR INSERT ON metabib.title_field_entry
1224 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('title');
1226 CREATE TRIGGER metabib_author_field_entry_fti_trigger
1227 BEFORE UPDATE OR INSERT ON metabib.author_field_entry
1228 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('author');
1230 CREATE TRIGGER metabib_subject_field_entry_fti_trigger
1231 BEFORE UPDATE OR INSERT ON metabib.subject_field_entry
1232 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('subject');
1234 CREATE TRIGGER metabib_keyword_field_entry_fti_trigger
1235 BEFORE UPDATE OR INSERT ON metabib.keyword_field_entry
1236 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('keyword');
1238 CREATE TRIGGER metabib_series_field_entry_fti_trigger
1239 BEFORE UPDATE OR INSERT ON metabib.series_field_entry
1240 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('series');
1242 CREATE TRIGGER metabib_browse_entry_fti_trigger
1243 BEFORE INSERT OR UPDATE ON metabib.browse_entry
1244 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('keyword');
1246 CREATE TRIGGER metabib_full_rec_fti_trigger
1247 BEFORE UPDATE OR INSERT ON metabib.real_full_rec
1248 FOR EACH ROW EXECUTE PROCEDURE oils_tsearch2('default');
1250 INSERT INTO config.ts_config_list(id, name) VALUES
1251 ('simple','Non-Stemmed Simple'),
1252 ('danish_nostop','Danish Stemmed'),
1253 ('dutch_nostop','Dutch Stemmed'),
1254 ('english_nostop','English Stemmed'),
1255 ('finnish_nostop','Finnish Stemmed'),
1256 ('french_nostop','French Stemmed'),
1257 ('german_nostop','German Stemmed'),
1258 ('hungarian_nostop','Hungarian Stemmed'),
1259 ('italian_nostop','Italian Stemmed'),
1260 ('norwegian_nostop','Norwegian Stemmed'),
1261 ('portuguese_nostop','Portuguese Stemmed'),
1262 ('romanian_nostop','Romanian Stemmed'),
1263 ('russian_nostop','Russian Stemmed'),
1264 ('spanish_nostop','Spanish Stemmed'),
1265 ('swedish_nostop','Swedish Stemmed'),
1266 ('turkish_nostop','Turkish Stemmed');
1268 INSERT INTO config.metabib_class_ts_map(field_class, ts_config, index_weight, always) VALUES
1269 ('keyword','simple','A',true),
1270 ('keyword','english_nostop','C',true),
1271 ('title','simple','A',true),
1272 ('title','english_nostop','C',true),
1273 ('author','simple','A',true),
1274 ('author','english_nostop','C',true),
1275 ('series','simple','A',true),
1276 ('series','english_nostop','C',true),
1277 ('subject','simple','A',true),
1278 ('subject','english_nostop','C',true),
1279 ('identifier','simple','A',true);
1281 CREATE OR REPLACE FUNCTION evergreen.rel_bump(terms TEXT[], value TEXT, bumps TEXT[], mults NUMERIC[]) RETURNS NUMERIC AS
1284 my ($terms,$value,$bumps,$mults) = @_;
1288 for (my $id = 0; $id < @$bumps; $id++) {
1289 if ($bumps->[$id] eq 'first_word') {
1290 $retval *= $mults->[$id] if ($value =~ /^$terms->[0]/);
1291 } elsif ($bumps->[$id] eq 'full_match') {
1292 my $fullmatch = join(' ', @$terms);
1293 $retval *= $mults->[$id] if ($value =~ /^$fullmatch$/);
1294 } elsif ($bumps->[$id] eq 'word_order') {
1295 my $wordorder = join('.*', @$terms);
1296 $retval *= $mults->[$id] if ($value =~ /$wordorder/);
1300 $BODY$ LANGUAGE plperlu IMMUTABLE STRICT COST 100;
1302 /* ** This happens in the supplemental script **
1304 UPDATE metabib.identifier_field_entry set value = value;
1305 UPDATE metabib.title_field_entry set value = value;
1306 UPDATE metabib.author_field_entry set value = value;
1307 UPDATE metabib.subject_field_entry set value = value;
1308 UPDATE metabib.keyword_field_entry set value = value;
1309 UPDATE metabib.series_field_entry set value = value;
1311 SELECT metabib.update_combined_index_vectors(id)
1312 FROM biblio.record_entry
1317 SELECT evergreen.upgrade_deps_block_check('0758', :eg_version);
1319 INSERT INTO config.settings_group (name, label) VALUES
1320 ('vandelay', 'Vandelay');
1322 INSERT INTO config.org_unit_setting_type (name, grp, label, datatype, fm_class) VALUES
1323 ('vandelay.default_match_set', 'vandelay', 'Default Record Match Set', 'link', 'vms');
1326 SELECT evergreen.upgrade_deps_block_check('0759', :eg_version);
1328 CREATE TABLE actor.org_unit_proximity_adjustment (
1329 id SERIAL PRIMARY KEY,
1330 item_circ_lib INT REFERENCES actor.org_unit (id),
1331 item_owning_lib INT REFERENCES actor.org_unit (id),
1332 copy_location INT REFERENCES asset.copy_location (id),
1333 hold_pickup_lib INT REFERENCES actor.org_unit (id),
1334 hold_request_lib INT REFERENCES actor.org_unit (id),
1335 pos INT NOT NULL DEFAULT 0,
1336 absolute_adjustment BOOL NOT NULL DEFAULT FALSE,
1337 prox_adjustment NUMERIC,
1338 circ_mod TEXT, -- REFERENCES config.circ_modifier (code),
1339 CONSTRAINT prox_adj_criterium CHECK (COALESCE(item_circ_lib::TEXT,item_owning_lib::TEXT,copy_location::TEXT,hold_pickup_lib::TEXT,hold_request_lib::TEXT,circ_mod) IS NOT NULL)
1341 CREATE UNIQUE INDEX prox_adj_once_idx ON actor.org_unit_proximity_adjustment (item_circ_lib,item_owning_lib,copy_location,hold_pickup_lib,hold_request_lib,circ_mod);
1342 CREATE INDEX prox_adj_circ_lib_idx ON actor.org_unit_proximity_adjustment (item_circ_lib);
1343 CREATE INDEX prox_adj_owning_lib_idx ON actor.org_unit_proximity_adjustment (item_owning_lib);
1344 CREATE INDEX prox_adj_copy_location_idx ON actor.org_unit_proximity_adjustment (copy_location);
1345 CREATE INDEX prox_adj_pickup_lib_idx ON actor.org_unit_proximity_adjustment (hold_pickup_lib);
1346 CREATE INDEX prox_adj_request_lib_idx ON actor.org_unit_proximity_adjustment (hold_request_lib);
1347 CREATE INDEX prox_adj_circ_mod_idx ON actor.org_unit_proximity_adjustment (circ_mod);
1349 CREATE OR REPLACE FUNCTION actor.org_unit_ancestors_distance( INT ) RETURNS TABLE (id INT, distance INT) AS $$
1350 WITH RECURSIVE org_unit_ancestors_distance(id, distance) AS (
1353 SELECT ou.parent_ou, ouad.distance+1
1354 FROM actor.org_unit ou JOIN org_unit_ancestors_distance ouad ON (ou.id = ouad.id)
1355 WHERE ou.parent_ou IS NOT NULL
1357 SELECT * FROM org_unit_ancestors_distance;
1358 $$ LANGUAGE SQL STABLE ROWS 1;
1360 CREATE OR REPLACE FUNCTION action.hold_copy_calculated_proximity(
1363 copy_context_ou INT DEFAULT NULL
1364 -- TODO maybe? hold_context_ou INT DEFAULT NULL. This would optionally
1365 -- support an "ahprox" measurement: adjust prox between copy circ lib and
1366 -- hold request lib, but I'm unsure whether to use this theoretical
1367 -- argument only in the baseline calculation or later in the other
1368 -- queries in this function.
1369 ) RETURNS NUMERIC AS $f$
1371 aoupa actor.org_unit_proximity_adjustment%ROWTYPE;
1372 ahr action.hold_request%ROWTYPE;
1373 acp asset.copy%ROWTYPE;
1374 acn asset.call_number%ROWTYPE;
1375 acl asset.copy_location%ROWTYPE;
1376 baseline_prox NUMERIC;
1386 SELECT * INTO ahr FROM action.hold_request WHERE id = ahr_id;
1387 SELECT * INTO acp FROM asset.copy WHERE id = acp_id;
1388 SELECT * INTO acn FROM asset.call_number WHERE id = acp.call_number;
1389 SELECT * INTO acl FROM asset.copy_location WHERE id = acp.location;
1391 IF copy_context_ou IS NULL THEN
1392 copy_context_ou := acp.circ_lib;
1395 -- First, gather the baseline proximity of "here" to pickup lib
1396 SELECT prox INTO baseline_prox FROM actor.org_unit_proximity WHERE from_org = copy_context_ou AND to_org = ahr.pickup_lib;
1398 -- Find any absolute adjustments, and set the baseline prox to that
1399 SELECT adj.* INTO aoupa
1400 FROM actor.org_unit_proximity_adjustment adj
1401 LEFT JOIN actor.org_unit_ancestors_distance(copy_context_ou) acp_cl ON (acp_cl.id = adj.item_circ_lib)
1402 LEFT JOIN actor.org_unit_ancestors_distance(acn.owning_lib) acn_ol ON (acn_ol.id = adj.item_owning_lib)
1403 LEFT JOIN actor.org_unit_ancestors_distance(acl.owning_lib) acl_ol ON (acn_ol.id = adj.copy_location)
1404 LEFT JOIN actor.org_unit_ancestors_distance(ahr.pickup_lib) ahr_pl ON (ahr_pl.id = adj.hold_pickup_lib)
1405 LEFT JOIN actor.org_unit_ancestors_distance(ahr.request_lib) ahr_rl ON (ahr_rl.id = adj.hold_request_lib)
1406 WHERE (adj.circ_mod IS NULL OR adj.circ_mod = acp.circ_modifier) AND
1407 absolute_adjustment AND
1408 COALESCE(acp_cl.id, acn_ol.id, acl_ol.id, ahr_pl.id, ahr_rl.id) IS NOT NULL
1410 COALESCE(acp_cl.distance,999)
1411 + COALESCE(acn_ol.distance,999)
1412 + COALESCE(acl_ol.distance,999)
1413 + COALESCE(ahr_pl.distance,999)
1414 + COALESCE(ahr_rl.distance,999),
1419 baseline_prox := aoupa.prox_adjustment;
1422 -- Now find any relative adjustments, and change the baseline prox based on them
1425 FROM actor.org_unit_proximity_adjustment adj
1426 LEFT JOIN actor.org_unit_ancestors_distance(copy_context_ou) acp_cl ON (acp_cl.id = adj.item_circ_lib)
1427 LEFT JOIN actor.org_unit_ancestors_distance(acn.owning_lib) acn_ol ON (acn_ol.id = adj.item_owning_lib)
1428 LEFT JOIN actor.org_unit_ancestors_distance(acl.owning_lib) acl_ol ON (acn_ol.id = adj.copy_location)
1429 LEFT JOIN actor.org_unit_ancestors_distance(ahr.pickup_lib) ahr_pl ON (ahr_pl.id = adj.hold_pickup_lib)
1430 LEFT JOIN actor.org_unit_ancestors_distance(ahr.request_lib) ahr_rl ON (ahr_rl.id = adj.hold_request_lib)
1431 WHERE (adj.circ_mod IS NULL OR adj.circ_mod = acp.circ_modifier) AND
1432 NOT absolute_adjustment AND
1433 COALESCE(acp_cl.id, acn_ol.id, acl_ol.id, ahr_pl.id, ahr_rl.id) IS NOT NULL
1435 baseline_prox := baseline_prox + aoupa.prox_adjustment;
1438 RETURN baseline_prox;
1440 $f$ LANGUAGE PLPGSQL;
1442 ALTER TABLE actor.org_unit_proximity_adjustment
1443 ADD CONSTRAINT actor_org_unit_proximity_adjustment_circ_mod_fkey
1444 FOREIGN KEY (circ_mod) REFERENCES config.circ_modifier (code)
1445 DEFERRABLE INITIALLY DEFERRED;
1447 ALTER TABLE action.hold_copy_map ADD COLUMN proximity NUMERIC;
1450 SELECT evergreen.upgrade_deps_block_check('0760', :eg_version);
1452 CREATE TABLE config.best_hold_order(
1453 id SERIAL PRIMARY KEY, -- (metadata)
1454 name TEXT UNIQUE, -- i18n (metadata)
1455 pprox INT, -- copy capture <-> pickup lib prox
1456 hprox INT, -- copy circ lib <-> request lib prox
1457 aprox INT, -- copy circ lib <-> pickup lib ADJUSTED prox on ahcm
1458 approx INT, -- copy capture <-> pickup lib ADJUSTED prox from function
1459 priority INT, -- group hold priority
1460 cut INT, -- cut-in-line
1461 depth INT, -- selection depth
1462 htime INT, -- time since last home-lib circ exceeds org-unit setting
1463 rtime INT, -- request time
1464 shtime INT -- time since copy last trip home exceeds org-unit setting
1467 -- At least one of these columns must contain a non-null value
1468 ALTER TABLE config.best_hold_order ADD CHECK ((
1469 pprox IS NOT NULL OR
1470 hprox IS NOT NULL OR
1471 aprox IS NOT NULL OR
1472 priority IS NOT NULL OR
1474 depth IS NOT NULL OR
1475 htime IS NOT NULL OR
1479 INSERT INTO config.best_hold_order (
1481 pprox, aprox, priority, cut, depth, rtime, htime, hprox
1484 1, 2, 3, 4, 5, 6, 7, 8
1487 INSERT INTO config.best_hold_order (
1489 hprox, pprox, aprox, priority, cut, depth, rtime, htime
1491 'Traditional with Holds-always-go-home',
1492 1, 2, 3, 4, 5, 6, 7, 8
1495 INSERT INTO config.best_hold_order (
1497 htime, hprox, pprox, aprox, priority, cut, depth, rtime
1499 'Traditional with Holds-go-home',
1500 1, 2, 3, 4, 5, 6, 7, 8
1503 INSERT INTO config.best_hold_order (
1505 priority, cut, rtime, depth, pprox, hprox, aprox, htime
1508 1, 2, 3, 4, 5, 6, 7, 8
1511 INSERT INTO config.best_hold_order (
1513 hprox, priority, cut, rtime, depth, pprox, aprox, htime
1515 'FIFO with Holds-always-go-home',
1516 1, 2, 3, 4, 5, 6, 7, 8
1519 INSERT INTO config.best_hold_order (
1521 htime, priority, cut, rtime, depth, pprox, aprox, hprox
1523 'FIFO with Holds-go-home',
1524 1, 2, 3, 4, 5, 6, 7, 8
1527 INSERT INTO permission.perm_list (
1528 id, code, description
1531 'ADMIN_HOLD_CAPTURE_SORT',
1534 'Allows a user to make changes to best-hold selection sort order',
1540 INSERT INTO config.org_unit_setting_type (
1541 name, label, description, datatype, fm_class, update_perm, grp
1543 'circ.hold_capture_order',
1545 'circ.hold_capture_order',
1546 'Best-hold selection sort order',
1551 'circ.hold_capture_order',
1552 'Defines the sort order of holds when selecting a hold to fill using a given copy at capture time',
1562 INSERT INTO config.org_unit_setting_type (
1563 name, label, description, datatype, update_perm, grp
1565 'circ.hold_go_home_interval',
1567 'circ.hold_go_home_interval',
1568 'Max foreign-circulation time',
1573 'circ.hold_go_home_interval',
1574 'Time a copy can spend circulating away from its circ lib before returning there to fill a hold (if one exists there)',
1583 INSERT INTO actor.org_unit_setting (
1584 org_unit, name, value
1586 (SELECT id FROM actor.org_unit WHERE parent_ou IS NULL),
1587 'circ.hold_go_home_interval',
1591 UPDATE actor.org_unit_setting SET
1592 name = 'circ.hold_capture_order',
1593 value = (SELECT id FROM config.best_hold_order WHERE name = 'FIFO')
1595 name = 'circ.holds_fifo' AND value ILIKE '%true%';
1598 SELECT evergreen.upgrade_deps_block_check('0762', :eg_version);
1600 INSERT INTO config.internal_flag (name) VALUES ('ingest.skip_browse_indexing');
1601 INSERT INTO config.internal_flag (name) VALUES ('ingest.skip_search_indexing');
1602 INSERT INTO config.internal_flag (name) VALUES ('ingest.skip_facet_indexing');
1604 CREATE OR REPLACE FUNCTION metabib.reingest_metabib_field_entries( bib_id BIGINT, skip_facet BOOL DEFAULT FALSE, skip_browse BOOL DEFAULT FALSE, skip_search BOOL DEFAULT FALSE ) RETURNS VOID AS $func$
1607 ind_data metabib.field_entry_template%ROWTYPE;
1608 mbe_row metabib.browse_entry%ROWTYPE;
1615 SELECT COALESCE(NULLIF(skip_facet, FALSE), EXISTS (SELECT enabled FROM config.internal_flag WHERE name = 'ingest.skip_facet_indexing' AND enabled)) INTO b_skip_facet;
1616 SELECT COALESCE(NULLIF(skip_browse, FALSE), EXISTS (SELECT enabled FROM config.internal_flag WHERE name = 'ingest.skip_browse_indexing' AND enabled)) INTO b_skip_browse;
1617 SELECT COALESCE(NULLIF(skip_search, FALSE), EXISTS (SELECT enabled FROM config.internal_flag WHERE name = 'ingest.skip_search_indexing' AND enabled)) INTO b_skip_search;
1619 PERFORM * FROM config.internal_flag WHERE name = 'ingest.assume_inserts_only' AND enabled;
1621 IF NOT b_skip_search THEN
1622 FOR fclass IN SELECT * FROM config.metabib_class LOOP
1623 -- RAISE NOTICE 'Emptying out %', fclass.name;
1624 EXECUTE $$DELETE FROM metabib.$$ || fclass.name || $$_field_entry WHERE source = $$ || bib_id;
1627 IF NOT b_skip_facet THEN
1628 DELETE FROM metabib.facet_entry WHERE source = bib_id;
1630 IF NOT b_skip_browse THEN
1631 DELETE FROM metabib.browse_entry_def_map WHERE source = bib_id;
1635 FOR ind_data IN SELECT * FROM biblio.extract_metabib_field_entry( bib_id ) LOOP
1636 IF ind_data.field < 0 THEN
1637 ind_data.field = -1 * ind_data.field;
1640 IF ind_data.facet_field AND NOT b_skip_facet THEN
1641 INSERT INTO metabib.facet_entry (field, source, value)
1642 VALUES (ind_data.field, ind_data.source, ind_data.value);
1645 IF ind_data.browse_field AND NOT b_skip_browse THEN
1646 -- A caveat about this SELECT: this should take care of replacing
1647 -- old mbe rows when data changes, but not if normalization (by
1648 -- which I mean specifically the output of
1649 -- evergreen.oils_tsearch2()) changes. It may or may not be
1650 -- expensive to add a comparison of index_vector to index_vector
1651 -- to the WHERE clause below.
1652 SELECT INTO mbe_row * FROM metabib.browse_entry WHERE value = ind_data.value;
1654 mbe_id := mbe_row.id;
1656 INSERT INTO metabib.browse_entry (value) VALUES
1657 (metabib.browse_normalize(ind_data.value, ind_data.field));
1658 mbe_id := CURRVAL('metabib.browse_entry_id_seq'::REGCLASS);
1661 INSERT INTO metabib.browse_entry_def_map (entry, def, source)
1662 VALUES (mbe_id, ind_data.field, ind_data.source);
1665 IF ind_data.search_field AND NOT b_skip_search THEN
1667 INSERT INTO metabib.$$ || ind_data.field_class || $$_field_entry (field, source, value)
1669 quote_literal(ind_data.field) || $$, $$ ||
1670 quote_literal(ind_data.source) || $$, $$ ||
1671 quote_literal(ind_data.value) ||
1677 IF NOT b_skip_search THEN
1678 PERFORM metabib.update_combined_index_vectors(bib_id);
1683 $func$ LANGUAGE PLPGSQL;
1686 SELECT evergreen.upgrade_deps_block_check('0763', :eg_version);
1688 INSERT INTO config.org_unit_setting_type (
1689 name, label, grp, datatype
1691 'circ.fines.truncate_to_max_fine',
1692 'Truncate fines to max fine amount',
1699 SELECT evergreen.upgrade_deps_block_check('0765', :eg_version);
1701 ALTER TABLE acq.provider
1702 ADD COLUMN default_copy_count INTEGER NOT NULL DEFAULT 0;
1705 SELECT evergreen.upgrade_deps_block_check('0768', :eg_version);
1707 CREATE OR REPLACE FUNCTION evergreen.rank_ou(lib INT, search_lib INT, pref_lib INT DEFAULT NULL)
1708 RETURNS INTEGER AS $$
1711 -- lib matches search_lib
1712 (SELECT CASE WHEN $1 = $2 THEN -20000 END),
1714 -- lib matches pref_lib
1715 (SELECT CASE WHEN $1 = $3 THEN -10000 END),
1718 -- pref_lib is a child of search_lib and lib is a child of pref lib.
1719 (SELECT distance - 5000
1720 FROM actor.org_unit_descendants_distance($3)
1721 WHERE id = $1 AND $3 IN (
1722 SELECT id FROM actor.org_unit_descendants($2))),
1724 -- lib is a child of search_lib
1725 (SELECT distance FROM actor.org_unit_descendants_distance($2) WHERE id = $1),
1727 -- all others pay cash
1730 $$ LANGUAGE SQL STABLE;
1735 SELECT evergreen.upgrade_deps_block_check('0769', :eg_version);
1737 DROP FUNCTION IF EXISTS
1738 evergreen.ranked_volumes(BIGINT, INT, INT, HSTORE, HSTORE, INT);
1740 CREATE OR REPLACE FUNCTION evergreen.ranked_volumes(
1743 depth INT DEFAULT NULL,
1744 slimit HSTORE DEFAULT NULL,
1745 soffset HSTORE DEFAULT NULL,
1746 pref_lib INT DEFAULT NULL,
1747 includes TEXT[] DEFAULT NULL::TEXT[]
1748 ) RETURNS TABLE (id BIGINT, name TEXT, label_sortkey TEXT, rank BIGINT) AS $$
1749 SELECT ua.id, ua.name, ua.label_sortkey, MIN(ua.rank) AS rank FROM (
1750 SELECT acn.id, aou.name, acn.label_sortkey,
1751 evergreen.rank_ou(aou.id, $2, $6), evergreen.rank_cp_status(acp.status),
1753 FROM asset.call_number acn
1754 JOIN asset.copy acp ON (acn.id = acp.call_number)
1755 JOIN actor.org_unit_descendants( $2, COALESCE(
1758 FROM actor.org_unit_type aout
1759 INNER JOIN actor.org_unit ou ON ou_type = aout.id
1762 ) AS aou ON (acp.circ_lib = aou.id)
1763 WHERE acn.record = $1
1764 AND acn.deleted IS FALSE
1765 AND acp.deleted IS FALSE
1766 AND CASE WHEN ('exclude_invisible_acn' = ANY($7)) THEN
1769 FROM asset.opac_visible_copies
1770 WHERE copy_id = acp.id AND record = acn.record
1772 GROUP BY acn.id, acp.status, aou.name, acn.label_sortkey, aou.id
1774 ORDER BY evergreen.rank_ou(aou.id, $2, $6), evergreen.rank_cp_status(acp.status)
1777 GROUP BY ua.id, ua.name, ua.label_sortkey
1778 ORDER BY rank, ua.name, ua.label_sortkey
1779 LIMIT ($4 -> 'acn')::INT
1780 OFFSET ($5 -> 'acn')::INT;
1782 LANGUAGE SQL STABLE;
1784 CREATE OR REPLACE FUNCTION unapi.holdings_xml (
1788 depth INT DEFAULT NULL,
1789 includes TEXT[] DEFAULT NULL::TEXT[],
1790 slimit HSTORE DEFAULT NULL,
1791 soffset HSTORE DEFAULT NULL,
1792 include_xmlns BOOL DEFAULT TRUE,
1793 pref_lib INT DEFAULT NULL
1799 CASE WHEN $8 THEN 'http://open-ils.org/spec/holdings/v1' ELSE NULL END AS xmlns,
1800 CASE WHEN ('bre' = ANY ($5)) THEN 'tag:open-ils.org:U2@bre/' || $1 || '/' || $3 ELSE NULL END AS id,
1801 (SELECT record_has_holdable_copy FROM asset.record_has_holdable_copy($1)) AS has_holdable
1805 (SELECT XMLAGG(XMLELEMENT::XML) FROM (
1808 XMLATTRIBUTES('public' as type, depth, org_unit, coalesce(transcendant,0) as transcendant, available, visible as count, unshadow)
1810 FROM asset.opac_ou_record_copy_count($2, $1)
1814 XMLATTRIBUTES('staff' as type, depth, org_unit, coalesce(transcendant,0) as transcendant, available, visible as count, unshadow)
1816 FROM asset.staff_ou_record_copy_count($2, $1)
1820 XMLATTRIBUTES('pref_lib' as type, depth, org_unit, coalesce(transcendant,0) as transcendant, available, visible as count, unshadow)
1822 FROM asset.opac_ou_record_copy_count($9, $1)
1827 WHEN ('bmp' = ANY ($5)) THEN
1829 name monograph_parts,
1830 (SELECT XMLAGG(bmp) FROM (
1831 SELECT unapi.bmp( id, 'xml', 'monograph_part', evergreen.array_remove_item_by_value( evergreen.array_remove_item_by_value($5,'bre'), 'holdings_xml'), $3, $4, $6, $7, FALSE)
1832 FROM biblio.monograph_part
1840 (SELECT XMLAGG(acn ORDER BY rank, name, label_sortkey) FROM (
1842 SELECT unapi.acn(y.id,'xml','volume',evergreen.array_remove_item_by_value( evergreen.array_remove_item_by_value($5,'holdings_xml'),'bre'), $3, $4, $6, $7, FALSE), y.rank, name, label_sortkey
1843 FROM evergreen.ranked_volumes($1, $2, $4, $6, $7, $9, $5) AS y
1846 SELECT unapi.acn(uris.id,'xml','volume',evergreen.array_remove_item_by_value( evergreen.array_remove_item_by_value($5,'holdings_xml'),'bre'), $3, $4, $6, $7, FALSE), 0, name, label_sortkey
1847 FROM evergreen.located_uris($1, $2, $9) AS uris
1850 CASE WHEN ('ssub' = ANY ($5)) THEN
1853 (SELECT XMLAGG(ssub) FROM (
1854 SELECT unapi.ssub(id,'xml','subscription','{}'::TEXT[], $3, $4, $6, $7, FALSE)
1855 FROM serial.subscription
1856 WHERE record_entry = $1
1860 CASE WHEN ('acp' = ANY ($5)) THEN
1862 name foreign_copies,
1863 (SELECT XMLAGG(acp) FROM (
1864 SELECT unapi.acp(p.target_copy,'xml','copy',evergreen.array_remove_item_by_value($5,'acp'), $3, $4, $6, $7, FALSE)
1865 FROM biblio.peer_bib_copy_map p
1866 JOIN asset.copy c ON (p.target_copy = c.id)
1867 WHERE NOT c.deleted AND p.peer_record = $1
1868 LIMIT ($6 -> 'acp')::INT
1869 OFFSET ($7 -> 'acp')::INT
1874 $F$ LANGUAGE SQL STABLE;
1878 SELECT evergreen.upgrade_deps_block_check('0771', :eg_version);
1880 INSERT INTO action_trigger.hook (
1888 'A user was barred by staff',
1892 INSERT INTO action_trigger.hook (
1900 'A user was un-barred by staff',
1904 INSERT INTO action_trigger.validator (
1909 'Tests if a patron is currently marked as barred'
1912 INSERT INTO action_trigger.validator (
1917 'Tests if a patron is currently not marked as barred'
1921 SELECT evergreen.upgrade_deps_block_check('0772', :eg_version);
1923 INSERT INTO config.internal_flag (name) VALUES ('ingest.metarecord_mapping.preserve_on_delete'); -- defaults to false/off
1925 DROP RULE protect_bib_rec_delete ON biblio.record_entry;
1926 CREATE RULE protect_bib_rec_delete AS
1927 ON DELETE TO biblio.record_entry DO INSTEAD (
1928 UPDATE biblio.record_entry
1930 WHERE OLD.id = biblio.record_entry.id
1934 -- AFTER UPDATE OR INSERT trigger for biblio.record_entry
1935 CREATE OR REPLACE FUNCTION biblio.indexing_ingest_or_delete () RETURNS TRIGGER AS $func$
1937 transformed_xml TEXT;
1940 xfrm config.xml_transform%ROWTYPE;
1942 new_attrs HSTORE := ''::HSTORE;
1943 attr_def config.record_attr_definition%ROWTYPE;
1946 IF NEW.deleted IS TRUE THEN -- If this bib is deleted
1947 PERFORM * FROM config.internal_flag WHERE
1948 name = 'ingest.metarecord_mapping.preserve_on_delete' AND enabled;
1950 -- One needs to keep these around to support searches
1951 -- with the #deleted modifier, so one should turn on the named
1952 -- internal flag for that functionality.
1953 DELETE FROM metabib.metarecord_source_map WHERE source = NEW.id;
1954 DELETE FROM metabib.record_attr WHERE id = NEW.id;
1957 DELETE FROM authority.bib_linking WHERE bib = NEW.id; -- Avoid updating fields in bibs that are no longer visible
1958 DELETE FROM biblio.peer_bib_copy_map WHERE peer_record = NEW.id; -- Separate any multi-homed items
1959 DELETE FROM metabib.browse_entry_def_map WHERE source = NEW.id; -- Don't auto-suggest deleted bibs
1960 RETURN NEW; -- and we're done
1963 IF TG_OP = 'UPDATE' THEN -- re-ingest?
1964 PERFORM * FROM config.internal_flag WHERE name = 'ingest.reingest.force_on_same_marc' AND enabled;
1966 IF NOT FOUND AND OLD.marc = NEW.marc THEN -- don't do anything if the MARC didn't change
1971 -- Record authority linking
1972 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_authority_linking' AND enabled;
1974 PERFORM biblio.map_authority_linking( NEW.id, NEW.marc );
1977 -- Flatten and insert the mfr data
1978 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_metabib_full_rec' AND enabled;
1980 PERFORM metabib.reingest_metabib_full_rec(NEW.id);
1982 -- Now we pull out attribute data, which is dependent on the mfr for all but XPath-based fields
1983 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_metabib_rec_descriptor' AND enabled;
1985 FOR attr_def IN SELECT * FROM config.record_attr_definition ORDER BY format LOOP
1987 IF attr_def.tag IS NOT NULL THEN -- tag (and optional subfield list) selection
1988 SELECT ARRAY_TO_STRING(ARRAY_ACCUM(value), COALESCE(attr_def.joiner,' ')) INTO attr_value
1989 FROM (SELECT * FROM metabib.full_rec ORDER BY tag, subfield) AS x
1990 WHERE record = NEW.id
1991 AND tag LIKE attr_def.tag
1993 WHEN attr_def.sf_list IS NOT NULL
1994 THEN POSITION(subfield IN attr_def.sf_list) > 0
2001 ELSIF attr_def.fixed_field IS NOT NULL THEN -- a named fixed field, see config.marc21_ff_pos_map.fixed_field
2002 attr_value := biblio.marc21_extract_fixed_field(NEW.id, attr_def.fixed_field);
2004 ELSIF attr_def.xpath IS NOT NULL THEN -- and xpath expression
2006 SELECT INTO xfrm * FROM config.xml_transform WHERE name = attr_def.format;
2008 -- See if we can skip the XSLT ... it's expensive
2009 IF prev_xfrm IS NULL OR prev_xfrm <> xfrm.name THEN
2010 -- Can't skip the transform
2011 IF xfrm.xslt <> '---' THEN
2012 transformed_xml := oils_xslt_process(NEW.marc,xfrm.xslt);
2014 transformed_xml := NEW.marc;
2017 prev_xfrm := xfrm.name;
2020 IF xfrm.name IS NULL THEN
2021 -- just grab the marcxml (empty) transform
2022 SELECT INTO xfrm * FROM config.xml_transform WHERE xslt = '---' LIMIT 1;
2023 prev_xfrm := xfrm.name;
2026 attr_value := oils_xpath_string(attr_def.xpath, transformed_xml, COALESCE(attr_def.joiner,' '), ARRAY[ARRAY[xfrm.prefix, xfrm.namespace_uri]]);
2028 ELSIF attr_def.phys_char_sf IS NOT NULL THEN -- a named Physical Characteristic, see config.marc21_physical_characteristic_*_map
2029 SELECT m.value INTO attr_value
2030 FROM biblio.marc21_physical_characteristics(NEW.id) v
2031 JOIN config.marc21_physical_characteristic_value_map m ON (m.id = v.value)
2032 WHERE v.subfield = attr_def.phys_char_sf
2033 LIMIT 1; -- Just in case ...
2037 -- apply index normalizers to attr_value
2039 SELECT n.func AS func,
2040 n.param_count AS param_count,
2042 FROM config.index_normalizer n
2043 JOIN config.record_attr_index_norm_map m ON (m.norm = n.id)
2044 WHERE attr = attr_def.name
2046 EXECUTE 'SELECT ' || normalizer.func || '(' ||
2047 COALESCE( quote_literal( attr_value ), 'NULL' ) ||
2049 WHEN normalizer.param_count > 0
2050 THEN ',' || REPLACE(REPLACE(BTRIM(normalizer.params,'[]'),E'\'',E'\\\''),E'"',E'\'')
2053 ')' INTO attr_value;
2057 -- Add the new value to the hstore
2058 new_attrs := new_attrs || hstore( attr_def.name, attr_value );
2062 IF TG_OP = 'INSERT' OR OLD.deleted THEN -- initial insert OR revivication
2063 INSERT INTO metabib.record_attr (id, attrs) VALUES (NEW.id, new_attrs);
2065 UPDATE metabib.record_attr SET attrs = new_attrs WHERE id = NEW.id;
2071 -- Gather and insert the field entry data
2072 PERFORM metabib.reingest_metabib_field_entries(NEW.id);
2074 -- Located URI magic
2075 IF TG_OP = 'INSERT' THEN
2076 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_located_uri' AND enabled;
2078 PERFORM biblio.extract_located_uris( NEW.id, NEW.marc, NEW.editor );
2081 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_located_uri' AND enabled;
2083 PERFORM biblio.extract_located_uris( NEW.id, NEW.marc, NEW.editor );
2087 -- (re)map metarecord-bib linking
2088 IF TG_OP = 'INSERT' THEN -- if not deleted and performing an insert, check for the flag
2089 PERFORM * FROM config.internal_flag WHERE name = 'ingest.metarecord_mapping.skip_on_insert' AND enabled;
2091 PERFORM metabib.remap_metarecord_for_bib( NEW.id, NEW.fingerprint );
2093 ELSE -- we're doing an update, and we're not deleted, remap
2094 PERFORM * FROM config.internal_flag WHERE name = 'ingest.metarecord_mapping.skip_on_update' AND enabled;
2096 PERFORM metabib.remap_metarecord_for_bib( NEW.id, NEW.fingerprint );
2102 $func$ LANGUAGE PLPGSQL;
2105 -- Evergreen DB patch xxxx.data.authority_thesaurus_update.sql
2108 -- check whether patch can be applied
2109 SELECT evergreen.upgrade_deps_block_check('0773', :eg_version);
2112 INSERT INTO authority.thesaurus (code, name, control_set) VALUES
2113 (' ', oils_i18n_gettext(' ','Alternate no attempt to code','at','name'), NULL);
2117 SELECT evergreen.upgrade_deps_block_check('0774', :eg_version);
2119 CREATE TABLE config.z3950_source_credentials (
2120 id SERIAL PRIMARY KEY,
2121 owner INTEGER NOT NULL REFERENCES actor.org_unit(id),
2122 source TEXT NOT NULL REFERENCES config.z3950_source(name) ON DELETE CASCADE ON UPDATE CASCADE DEFERRABLE INITIALLY DEFERRED,
2123 -- do some Z servers require a username but no password or vice versa?
2126 CONSTRAINT czsc_source_once_per_lib UNIQUE (source, owner)
2129 -- find the most relevant set of credentials for the Z source and org
2130 CREATE OR REPLACE FUNCTION config.z3950_source_credentials_lookup
2131 (source TEXT, owner INTEGER)
2132 RETURNS config.z3950_source_credentials AS $$
2135 FROM config.z3950_source_credentials creds
2136 JOIN actor.org_unit aou ON (aou.id = creds.owner)
2137 JOIN actor.org_unit_type aout ON (aout.id = aou.ou_type)
2138 WHERE creds.source = $1 AND creds.owner IN (
2139 SELECT id FROM actor.org_unit_ancestors($2)
2141 ORDER BY aout.depth DESC LIMIT 1;
2143 $$ LANGUAGE SQL STABLE;
2145 -- since we are not exposing config.z3950_source_credentials
2146 -- via the IDL, providing a stored proc gives us a way to
2147 -- set values in the table via cstore
2148 CREATE OR REPLACE FUNCTION config.z3950_source_credentials_apply
2149 (src TEXT, org INTEGER, uname TEXT, passwd TEXT)
2152 PERFORM 1 FROM config.z3950_source_credentials
2153 WHERE owner = org AND source = src;
2156 IF COALESCE(uname, '') = '' AND COALESCE(passwd, '') = '' THEN
2157 DELETE FROM config.z3950_source_credentials
2158 WHERE owner = org AND source = src;
2160 UPDATE config.z3950_source_credentials
2161 SET username = uname, password = passwd
2162 WHERE owner = org AND source = src;
2165 IF COALESCE(uname, '') <> '' OR COALESCE(passwd, '') <> '' THEN
2166 INSERT INTO config.z3950_source_credentials
2167 (source, owner, username, password)
2168 VALUES (src, org, uname, passwd);
2172 $$ LANGUAGE PLPGSQL;
2177 SELECT evergreen.upgrade_deps_block_check('0775', :eg_version);
2179 ALTER TABLE config.z3950_attr
2180 DROP CONSTRAINT z3950_attr_source_fkey,
2181 ADD CONSTRAINT z3950_attr_source_fkey
2182 FOREIGN KEY (source)
2183 REFERENCES config.z3950_source(name)
2186 DEFERRABLE INITIALLY DEFERRED;
2189 SELECT evergreen.upgrade_deps_block_check('0776', :eg_version);
2191 ALTER TABLE acq.lineitem_attr
2192 ADD COLUMN order_ident BOOLEAN NOT NULL DEFAULT FALSE;
2194 INSERT INTO permission.perm_list ( id, code, description ) VALUES (
2196 'ACQ_ADD_LINEITEM_IDENTIFIER',
2199 'When granted, newly added lineitem identifiers will propagate to linked bib records',
2205 INSERT INTO permission.perm_list ( id, code, description ) VALUES (
2207 'ACQ_SET_LINEITEM_IDENTIFIER',
2210 'Allows staff to change the lineitem identifier',
2217 SELECT evergreen.upgrade_deps_block_check('0777', :eg_version);
2219 -- Listed here for reference / ease of access. The update
2220 -- is not applied here (see the WHERE clause).
2221 UPDATE action_trigger.event_definition SET template =
2225 # extract some commonly used variables
2227 VENDOR_SAN = target.provider.san;
2228 VENDCODE = target.provider.edi_default.vendcode;
2229 VENDACCT = target.provider.edi_default.vendacct;
2230 ORG_UNIT_SAN = target.ordering_agency.mailing_address.san;
2232 # set the vendor / provider
2234 VENDOR_BT = 0; # Baker & Taylor
2237 VENDOR_MW_TAPE = 0; # Midwest Tape
2238 VENDOR_RB = 0; # Recorded Books
2239 VENDOR_ULS = 0; # ULS
2241 IF VENDOR_SAN == '1556150'; VENDOR_BT = 1;
2242 ELSIF VENDOR_SAN == '1697684'; VENDOR_BRODART = 1;
2243 ELSIF VENDOR_SAN == '1697978'; VENDOR_INGRAM = 1;
2244 ELSIF VENDOR_SAN == '2549913'; VENDOR_MW_TAPE = 1;
2245 ELSIF VENDOR_SAN == '1113984'; VENDOR_RB = 1;
2246 ELSIF VENDOR_SAN == '1699342'; VENDOR_ULS = 1;
2249 # if true, pass the PO name as a secondary identifier
2250 # RFF+LI:<name>/li_id
2256 # GIR configuration --------------------------------------
2258 INC_COPIES = 1; # copies on/off switch
2263 INC_COLLECTION_CODE = 1;
2278 INC_COLLECTION_CODE = 0;
2282 # END GIR configuration ---------------------------------
2285 [%- BLOCK big_block -%]
2287 "recipient":"[% VENDOR_SAN %]",
2288 "sender":"[% ORG_UNIT_SAN %]",
2290 "ORDERS":[ "order", {
2292 "po_number":[% target.id %],
2294 [% IF INC_PO_NAME %]
2295 "po_name":"[% target.name | replace('\/', ' ') | replace('"', '\"') %]",
2298 "date":"[% date.format(date.now, '%Y%m%d') %]",
2302 {"id-qualifier": 91, "id":"[% ORG_UNIT_SAN %] [% VENDCODE %]"}
2304 {"id":"[% ORG_UNIT_SAN %]"},
2305 {"id-qualifier": 91, "id":"[% VENDACCT %]"}
2311 {"id-qualifier": 92, "id":"[% target.provider.id %]"}
2314 "currency":"[% target.provider.currency_type %]",
2317 [%- FOR li IN target.lineitems %]
2319 "line_index":"[% li.id %]",
2323 idqual = 'EN'; # default ISBN/UPC/EAN-13
2324 ident_attr = helpers.get_li_order_ident(li.attributes);
2326 idname = ident_attr.attr_name;
2327 idval = ident_attr.attr_value;
2328 IF idname == 'isbn' AND idval.length != 13;
2330 ELSIF idname == 'issn';
2337 {"id-qualifier":"[% idqual %]","id":"[% idval %]"}
2339 "price":[% li.estimated_unit_price || '0.00' %],
2341 {"BTI":"[% helpers.get_li_attr_jedi('title', '', li.attributes) %]"},
2342 {"BPU":"[% helpers.get_li_attr_jedi('publisher', '', li.attributes) %]"},
2343 {"BPD":"[% helpers.get_li_attr_jedi('pubdate', '', li.attributes) %]"},
2344 [% IF VENDOR_ULS -%]
2345 {"BEN":"[% helpers.get_li_attr_jedi('edition', '', li.attributes) %]"},
2346 {"BAU":"[% helpers.get_li_attr_jedi('author', '', li.attributes) %]"}
2348 {"BPH":"[% helpers.get_li_attr_jedi('pagination','', li.attributes) %]"}
2352 FOR note IN li.lineitem_notes;
2353 NEXT UNLESS note.vendor_public == 't';
2354 ftx_vals.push(note.value);
2356 IF VENDOR_BRODART; # look for copy-level spec code
2357 FOR lid IN li.lineitem_details;
2359 spec_note = lid.note.match('spec code ([a-zA-Z0-9_])');
2360 IF spec_note.0; ftx_vals.push(spec_note.0); END;
2364 IF xtra_ftx; ftx_vals.unshift(xtra_ftx); END;
2366 # BT & ULS want FTX+LIN for every LI, even if empty
2367 IF ((VENDOR_BT OR VENDOR_ULS) AND ftx_vals.size == 0);
2368 ftx_vals.unshift('');
2373 [% FOR note IN ftx_vals -%] "[% note %]"[% UNLESS loop.last %], [% END %][% END %]
2376 "quantity":[% li.lineitem_details.size %],
2378 [%- IF INC_COPIES -%]
2380 [%- compressed_copies = [];
2381 FOR lid IN li.lineitem_details;
2382 fund = lid.fund.code;
2383 item_type = lid.circ_modifier;
2384 callnumber = lid.cn_label;
2385 owning_lib = lid.owning_lib.shortname;
2386 location = lid.location;
2387 collection_code = lid.collection_code;
2389 # when we have real copy data, treat it as authoritative for some fields
2390 acp = lid.eg_copy_id;
2392 item_type = acp.circ_modifier;
2393 callnumber = acp.call_number.label;
2394 location = acp.location.name;
2398 # collapse like copies into groups w/ quantity
2401 IF !INC_COPY_ID; # INC_COPY_ID implies 1 copy per GIR
2402 FOR copy IN compressed_copies;
2403 IF (fund == copy.fund OR (!fund AND !copy.fund)) AND
2404 (item_type == copy.item_type OR (!item_type AND !copy.item_type)) AND
2405 (callnumber == copy.callnumber OR (!callnumber AND !copy.callnumber)) AND
2406 (owning_lib == copy.owning_lib OR (!owning_lib AND !copy.owning_lib)) AND
2407 (location == copy.location OR (!location AND !copy.location)) AND
2408 (collection_code == copy.collection_code OR (!collection_code AND !copy.collection_code));
2410 copy.quantity = copy.quantity + 1;
2417 compressed_copies.push({
2419 item_type => item_type,
2420 callnumber => callnumber,
2421 owning_lib => owning_lib,
2422 location => location,
2423 collection_code => collection_code,
2424 copy_id => lid.id, # for INC_COPY_ID
2429 FOR copy IN compressed_copies;
2431 # If we assume owning_lib is required and set,
2432 # it is safe to prepend each following copy field w/ a ","
2434 # B&T EDI requires expected GIR fields to be
2435 # present regardless of whether a value exists.
2436 # some fields are required to have a value in ACQ,
2437 # though, so they are not forced into place below.
2439 %]{[%- IF INC_OWNING_LIB AND copy.owning_lib %] "owning_lib":"[% copy.owning_lib %]"[% END -%]
2440 [%- IF INC_FUND AND copy.fund %],"fund":"[% copy.fund %]"[% END -%]
2441 [%- IF INC_CALLNUMBER AND (VENDOR_BT OR copy.callnumber) %],"call_number":"[% copy.callnumber %]"[% END -%]
2442 [%- IF INC_ITEM_TYPE AND (VENDOR_BT OR copy.item_type) %],"item_type":"[% copy.item_type %]"[% END -%]
2443 [%- IF INC_LOCATION AND copy.location %],"copy_location":"[% copy.location %]"[% END -%]
2444 [%- IF INC_COLLECTION_CODE AND (VENDOR_BT OR copy.collection_code) %],"collection_code":"[% copy.collection_code %]"[% END -%]
2445 [%- IF INC_QUANTITY %],"quantity":"[% copy.quantity %]"[% END -%]
2446 [%- IF INC_COPY_ID %],"copy_id":"[% copy.copy_id %]" [% END %]}[% ',' UNLESS loop.last -%]
2447 [%- END -%] [%# FOR compressed_copies -%]
2449 [%- END -%] [%# IF INC_COPIES %]
2451 }[% UNLESS loop.last %],[% END -%]
2453 [% END %] [%# END lineitems %]
2455 "line_items":[% target.lineitems.size %]
2456 }] [%# close ORDERS array %]
2457 }] [%# close body array %]
2460 [% tempo = PROCESS big_block; helpers.escape_json(tempo) %]
2462 WHERE ID = 23 AND FALSE; -- remove 'AND FALSE' to apply this update
2465 -- lineitem worksheet
2466 UPDATE action_trigger.event_definition SET template =
2470 # find a lineitem attribute by name and optional type
2472 FOR attr IN li.attributes;
2473 IF attr.attr_name == attr_name;
2474 IF !attr_type OR attr_type == attr.attr_type;
2483 <h2>Purchase Order [% target.id %]</h2>
2485 date <b>[% date.format(date.now, '%Y%m%d') %]</b>
2489 table td { padding:5px; border:1px solid #aaa;}
2490 table { width:95%; border-collapse:collapse; }
2491 #vendor-notes { padding:5px; border:1px solid #aaa; }
2493 <table id='vendor-table'>
2495 <td valign='top'>Vendor</td>
2497 <div>[% target.provider.name %]</div>
2498 <div>[% target.provider.addresses.0.street1 %]</div>
2499 <div>[% target.provider.addresses.0.street2 %]</div>
2500 <div>[% target.provider.addresses.0.city %]</div>
2501 <div>[% target.provider.addresses.0.state %]</div>
2502 <div>[% target.provider.addresses.0.country %]</div>
2503 <div>[% target.provider.addresses.0.post_code %]</div>
2505 <td valign='top'>Ship to / Bill to</td>
2507 <div>[% target.ordering_agency.name %]</div>
2508 <div>[% target.ordering_agency.billing_address.street1 %]</div>
2509 <div>[% target.ordering_agency.billing_address.street2 %]</div>
2510 <div>[% target.ordering_agency.billing_address.city %]</div>
2511 <div>[% target.ordering_agency.billing_address.state %]</div>
2512 <div>[% target.ordering_agency.billing_address.country %]</div>
2513 <div>[% target.ordering_agency.billing_address.post_code %]</div>
2519 <fieldset id='vendor-notes'>
2520 <legend>Notes to the Vendor</legend>
2522 [% FOR note IN target.notes %]
2523 [% IF note.vendor_public == 't' %]
2524 <li>[% note.value %]</li>
2535 <th>ISBN or Item #</th>
2546 [% FOR li IN target.lineitems %]
2549 [% count = li.lineitem_details.size %]
2550 [% price = li.estimated_unit_price %]
2551 [% litotal = (price * count) %]
2552 [% subtotal = subtotal + litotal %]
2554 ident_attr = helpers.get_li_order_ident(li.attributes);
2555 SET ident_value = ident_attr.attr_value IF ident_attr;
2557 <td>[% target.id %]</td>
2558 <td>[% ident_value %]</td>
2559 <td>[% PROCESS get_li_attr attr_name = 'title' %]</td>
2560 <td>[% count %]</td>
2561 <td>[% price %]</td>
2562 <td>[% litotal %]</td>
2565 [% FOR note IN li.lineitem_notes %]
2566 [% IF note.vendor_public == 't' %]
2567 <li>[% note.value %]</li>
2575 <td/><td/><td/><td/>
2577 <td>[% subtotal %]</td>
2584 Total Line Item Count: [% target.lineitems.size %]
2586 WHERE ID = 4; -- PO HTML
2589 SELECT evergreen.upgrade_deps_block_check('0778', :eg_version);
2591 CREATE OR REPLACE FUNCTION extract_marc_field_set
2592 (TEXT, BIGINT, TEXT, TEXT) RETURNS SETOF TEXT AS $$
2600 FROM oils_xpath_table(
2601 'id', 'marc', $1, $3, 'id = ' || $2)
2602 AS t(id int, t text))x
2604 IF $4 IS NOT NULL THEN
2605 SELECT INTO output (SELECT regexp_replace(output, $4, '', 'g'));
2611 $$ LANGUAGE PLPGSQL IMMUTABLE;
2614 CREATE OR REPLACE FUNCTION
2615 public.extract_acq_marc_field_set ( BIGINT, TEXT, TEXT)
2616 RETURNS SETOF TEXT AS $$
2617 SELECT extract_marc_field_set('acq.lineitem', $1, $2, $3);
2621 CREATE OR REPLACE FUNCTION public.ingest_acq_marc ( ) RETURNS TRIGGER AS $function$
2630 FOR adef IN SELECT *,tableoid FROM acq.lineitem_attr_definition LOOP
2632 SELECT relname::TEXT INTO atype FROM pg_class WHERE oid = adef.tableoid;
2634 IF (atype NOT IN ('lineitem_usr_attr_definition','lineitem_local_attr_definition')) THEN
2635 IF (atype = 'lineitem_provider_attr_definition') THEN
2636 SELECT provider INTO prov FROM acq.lineitem_provider_attr_definition WHERE id = adef.id;
2637 CONTINUE WHEN NEW.provider IS NULL OR prov <> NEW.provider;
2640 IF (atype = 'lineitem_provider_attr_definition') THEN
2641 SELECT xpath INTO xpath_string FROM acq.lineitem_provider_attr_definition WHERE id = adef.id;
2642 ELSIF (atype = 'lineitem_marc_attr_definition') THEN
2643 SELECT xpath INTO xpath_string FROM acq.lineitem_marc_attr_definition WHERE id = adef.id;
2644 ELSIF (atype = 'lineitem_generated_attr_definition') THEN
2645 SELECT xpath INTO xpath_string FROM acq.lineitem_generated_attr_definition WHERE id = adef.id;
2648 xpath_string := REGEXP_REPLACE(xpath_string,$re$//?text\(\)$$re$,'');
2650 IF (adef.code = 'title' OR adef.code = 'author') THEN
2651 -- title and author should not be split
2652 -- FIXME: once oils_xpath can grok XPATH 2.0 functions, we can use
2653 -- string-join in the xpath and remove this special case
2654 SELECT extract_acq_marc_field(id, xpath_string, adef.remove) INTO value FROM acq.lineitem WHERE id = NEW.id;
2655 IF (value IS NOT NULL AND value <> '') THEN
2656 INSERT INTO acq.lineitem_attr (lineitem, definition, attr_type, attr_name, attr_value)
2657 VALUES (NEW.id, adef.id, atype, adef.code, value);
2662 -- each application of the regex may produce multiple values
2664 SELECT * FROM extract_acq_marc_field_set(
2665 NEW.id, xpath_string || '[' || pos || ']', adef.remove)
2668 IF (value IS NOT NULL AND value <> '') THEN
2669 INSERT INTO acq.lineitem_attr
2670 (lineitem, definition, attr_type, attr_name, attr_value)
2671 VALUES (NEW.id, adef.id, atype, adef.code, value);
2689 $function$ LANGUAGE PLPGSQL;
2692 SELECT evergreen.upgrade_deps_block_check('0779', :eg_version);
2694 CREATE TABLE vandelay.import_bib_trash_group(
2695 id SERIAL PRIMARY KEY,
2696 owner INT NOT NULL REFERENCES actor.org_unit(id),
2697 label TEXT NOT NULL, --i18n
2698 always_apply BOOLEAN NOT NULL DEFAULT FALSE,
2699 CONSTRAINT vand_import_bib_trash_grp_owner_label UNIQUE (owner, label)
2702 -- otherwise, the ALTER TABLE statement below
2703 -- will fail with pending trigger events.
2704 SET CONSTRAINTS ALL IMMEDIATE;
2706 ALTER TABLE vandelay.import_bib_trash_fields
2707 -- allow null-able for now..
2708 ADD COLUMN grp INTEGER REFERENCES vandelay.import_bib_trash_group;
2710 -- add any existing trash_fields to "Legacy" groups (one per unique field
2711 -- owner) as part of the upgrade, since grp is now required.
2712 -- note that vandelay.import_bib_trash_fields was never used before,
2713 -- so in most cases this should be a no-op.
2715 INSERT INTO vandelay.import_bib_trash_group (owner, label)
2716 SELECT DISTINCT(owner), 'Legacy' FROM vandelay.import_bib_trash_fields;
2718 UPDATE vandelay.import_bib_trash_fields field SET grp = tgroup.id
2719 FROM vandelay.import_bib_trash_group tgroup
2720 WHERE tgroup.owner = field.owner;
2722 ALTER TABLE vandelay.import_bib_trash_fields
2723 -- now that have values, we can make this non-null
2724 ALTER COLUMN grp SET NOT NULL,
2725 -- drop outdated constraint
2726 DROP CONSTRAINT vand_import_bib_trash_fields_idx,
2727 -- owner is implied by the grp
2729 -- make grp+field unique
2730 ADD CONSTRAINT vand_import_bib_trash_fields_once_per UNIQUE (grp, field);
2733 SELECT evergreen.upgrade_deps_block_check('0780', :eg_version);
2735 ALTER TABLE acq.distribution_formula_entry
2736 ADD COLUMN fund INT REFERENCES acq.fund (id),
2737 ADD COLUMN circ_modifier TEXT REFERENCES config.circ_modifier (code),
2738 ADD COLUMN collection_code TEXT ;
2741 -- support option to roll distribution formula funds
2742 CREATE OR REPLACE FUNCTION acq.rollover_funds_by_org_tree(
2745 org_unit_id INTEGER,
2746 encumb_only BOOL DEFAULT FALSE,
2747 include_desc BOOL DEFAULT TRUE
2748 ) RETURNS VOID AS $$
2752 new_year INT := old_year + 1;
2755 xfer_amount NUMERIC := 0;
2759 roll_distrib_forms BOOL;
2765 IF old_year IS NULL THEN
2766 RAISE EXCEPTION 'Input year argument is NULL';
2767 ELSIF old_year NOT BETWEEN 2008 and 2200 THEN
2768 RAISE EXCEPTION 'Input year is out of range';
2771 IF user_id IS NULL THEN
2772 RAISE EXCEPTION 'Input user id argument is NULL';
2775 IF org_unit_id IS NULL THEN
2776 RAISE EXCEPTION 'Org unit id argument is NULL';
2779 -- Validate the org unit
2784 WHERE id = org_unit_id;
2786 IF org_found IS NULL THEN
2787 RAISE EXCEPTION 'Org unit id % is invalid', org_unit_id;
2788 ELSIF encumb_only THEN
2789 SELECT INTO perm_ous value::BOOL FROM
2790 actor.org_unit_ancestor_setting(
2791 'acq.fund.allow_rollover_without_money', org_unit_id
2793 IF NOT FOUND OR NOT perm_ous THEN
2794 RAISE EXCEPTION 'Encumbrance-only rollover not permitted at org %', org_unit_id;
2799 -- Loop over the propagable funds to identify the details
2800 -- from the old fund plus the id of the new one, if it exists.
2804 oldf.id AS old_fund,
2810 newf.id AS new_fund_id
2813 LEFT JOIN acq.fund AS newf
2814 ON ( oldf.code = newf.code )
2816 oldf.year = old_year
2818 AND newf.year = new_year
2819 AND ( ( include_desc AND oldf.org IN ( SELECT id FROM actor.org_unit_descendants( org_unit_id ) ) )
2820 OR (NOT include_desc AND oldf.org = org_unit_id ) )
2822 --RAISE NOTICE 'Processing fund %', roll_fund.old_fund;
2824 IF roll_fund.new_fund_id IS NULL THEN
2826 -- The old fund hasn't been propagated yet. Propagate it now.
2828 INSERT INTO acq.fund (
2836 balance_warning_percent,
2837 balance_stop_percent
2842 roll_fund.currency_type,
2846 roll_fund.balance_warning_percent,
2847 roll_fund.balance_stop_percent
2849 RETURNING id INTO new_fund;
2851 new_fund = roll_fund.new_fund_id;
2854 -- Determine the amount to transfer
2858 FROM acq.fund_spent_balance
2859 WHERE fund = roll_fund.old_fund;
2861 IF xfer_amount <> 0 THEN
2862 IF NOT encumb_only AND roll_fund.rollover THEN
2864 -- Transfer balance from old fund to new
2866 --RAISE NOTICE 'Transferring % from fund % to %', xfer_amount, roll_fund.old_fund, new_fund;
2868 PERFORM acq.transfer_fund(
2878 -- Transfer balance from old fund to the void
2880 -- RAISE NOTICE 'Transferring % from fund % to the void', xfer_amount, roll_fund.old_fund;
2882 PERFORM acq.transfer_fund(
2888 'Rollover into the void'
2893 IF roll_fund.rollover THEN
2895 -- Move any lineitems from the old fund to the new one
2896 -- where the associated debit is an encumbrance.
2898 -- Any other tables tying expenditure details to funds should
2899 -- receive similar treatment. At this writing there are none.
2901 UPDATE acq.lineitem_detail
2904 fund = roll_fund.old_fund -- this condition may be redundant
2910 fund = roll_fund.old_fund
2914 -- Move encumbrance debits from the old fund to the new fund
2916 UPDATE acq.fund_debit
2919 fund = roll_fund.old_fund
2923 -- Rollover distribution formulae funds
2924 SELECT INTO roll_distrib_forms value::BOOL FROM
2925 actor.org_unit_ancestor_setting(
2926 'acq.fund.rollover_distrib_forms', org_unit_id
2929 IF roll_distrib_forms THEN
2930 UPDATE acq.distribution_formula_entry
2931 SET fund = roll_fund.new_fund_id
2932 WHERE fund = roll_fund.old_fund;
2936 -- Mark old fund as inactive, now that we've closed it
2940 WHERE id = roll_fund.old_fund;
2943 $$ LANGUAGE plpgsql;
2947 SELECT evergreen.upgrade_deps_block_check('0781', :eg_version);
2949 INSERT INTO config.org_unit_setting_type
2950 (name, label, description, grp, datatype)
2952 'acq.fund.rollover_distrib_forms',
2954 'acq.fund.rollover_distrib_forms',
2955 'Rollover Distribution Formulae Funds',
2960 'acq.fund.rollover_distrib_forms',
2961 'During fiscal rollover, update distribution formalae to use new funds',
2970 -- No transaction needed. This can be run on a live, production server.
2971 SELECT evergreen.upgrade_deps_block_check('0782', :eg_version);
2973 /* ** Handled by the supplemental script ** */
2974 -- On a heavily used system, user activity lookup is painful. This is used
2975 -- on the patron display in the staff client.
2977 -- Measured speed increase: ~2s -> .01s
2978 -- CREATE INDEX CONCURRENTLY usr_activity_usr_idx on actor.usr_activity (usr);
2980 -- Finding open holds, often as a subquery within larger hold-related logic,
2981 -- can be sped up with the following.
2983 -- Measured speed increase: ~3s -> .02s
2984 -- CREATE INDEX CONCURRENTLY hold_request_open_idx on action.hold_request (id) where cancel_time IS NULL AND fulfillment_time IS NULL;
2986 -- Hold queue position is a particularly difficult thing to calculate
2987 -- efficiently. Recent changes in the query structure now allow some
2988 -- optimization via indexing. These do that.
2990 -- Measured speed increase: ~6s -> ~0.4s
2991 -- CREATE INDEX CONCURRENTLY cp_available_by_circ_lib_idx on asset.copy (circ_lib) where status IN (0,7);
2992 -- CREATE INDEX CONCURRENTLY hold_request_current_copy_before_cap_idx on action.hold_request (current_copy) where capture_time IS NULL AND cancel_time IS NULL;
2994 -- After heavy use, fetching EDI messages becomes time consuming. The following
2995 -- index addresses that for large-data environments.
2997 -- Measured speed increase: ~3s -> .1s
2998 -- CREATE INDEX CONCURRENTLY edi_message_account_status_idx on acq.edi_message (account,status);
3000 -- After heavy use, fetching POs becomes time consuming. The following
3001 -- index addresses that for large-data environments.
3003 -- Measured speed increase: ~1.5s -> .1s
3004 -- CREATE INDEX CONCURRENTLY edi_message_po_idx on acq.edi_message (purchase_order);
3006 -- Related to EDI messages, fetching of certain A/T events benefit from specific
3007 -- indexing. This index is more general than necessary for the observed query
3008 -- but ends up speeding several other (already relatively fast) queries.
3010 -- Measured speed increase: ~2s -> .06s
3011 -- CREATE INDEX CONCURRENTLY atev_def_state on action_trigger.event (event_def,state);
3013 -- Retrieval of hold transit by hold id (for transit completion or cancelation)
3014 -- is slow in some query formulations.
3016 -- Measured speed increase: ~.5s -> .1s
3017 -- CREATE INDEX CONCURRENTLY hold_transit_copy_hold_idx on action.hold_transit_copy (hold);
3020 SELECT evergreen.upgrade_deps_block_check('0785', :eg_version);
3022 DROP INDEX actor.prox_adj_once_idx;
3024 CREATE UNIQUE INDEX prox_adj_once_idx ON actor.org_unit_proximity_adjustment (
3025 COALESCE(item_circ_lib, -1),
3026 COALESCE(item_owning_lib, -1),
3027 COALESCE(copy_location, -1),
3028 COALESCE(hold_pickup_lib, -1),
3029 COALESCE(hold_request_lib, -1),
3030 COALESCE(circ_mod, ''),
3035 --Check if we can apply the upgrade.
3036 SELECT evergreen.upgrade_deps_block_check('0786', :eg_version);
3039 CREATE TYPE search.search_result AS ( id BIGINT, rel NUMERIC, record INT, total INT, checked INT, visible INT, deleted INT, excluded INT );
3040 CREATE TYPE search.search_args AS ( id INT, field_class TEXT, field_name TEXT, table_alias TEXT, term TEXT, term_type TEXT );
3042 CREATE OR REPLACE FUNCTION search.query_parser_fts (
3044 param_search_ou INT,
3047 param_statuses INT[],
3048 param_locations INT[],
3054 param_pref_ou INT DEFAULT NULL
3055 ) RETURNS SETOF search.search_result AS $func$
3058 current_res search.search_result%ROWTYPE;
3059 search_org_list INT[];
3060 luri_org_list INT[];
3069 core_cursor REFCURSOR;
3070 core_rel_query TEXT;
3072 total_count INT := 0;
3073 check_count INT := 0;
3074 deleted_count INT := 0;
3075 visible_count INT := 0;
3076 excluded_count INT := 0;
3080 check_limit := COALESCE( param_check, 1000 );
3081 core_limit := COALESCE( param_limit, 25000 );
3082 core_offset := COALESCE( param_offset, 0 );
3084 -- core_skip_chk := COALESCE( param_skip_chk, 1 );
3086 IF param_search_ou > 0 THEN
3087 IF param_depth IS NOT NULL THEN
3088 SELECT array_accum(distinct id) INTO search_org_list FROM actor.org_unit_descendants( param_search_ou, param_depth );
3090 SELECT array_accum(distinct id) INTO search_org_list FROM actor.org_unit_descendants( param_search_ou );
3093 SELECT array_accum(distinct id) INTO luri_org_list FROM actor.org_unit_ancestors( param_search_ou );
3095 ELSIF param_search_ou < 0 THEN
3096 SELECT array_accum(distinct org_unit) INTO search_org_list FROM actor.org_lasso_map WHERE lasso = -param_search_ou;
3098 FOR tmp_int IN SELECT * FROM UNNEST(search_org_list) LOOP
3099 SELECT array_accum(distinct id) INTO tmp_int_list FROM actor.org_unit_ancestors( tmp_int );
3100 luri_org_list := luri_org_list || tmp_int_list;
3103 SELECT array_accum(DISTINCT x.id) INTO luri_org_list FROM UNNEST(luri_org_list) x(id);
3105 ELSIF param_search_ou = 0 THEN
3106 -- reserved for user lassos (ou_buckets/type='lasso') with ID passed in depth ... hack? sure.
3109 IF param_pref_ou IS NOT NULL THEN
3110 SELECT array_accum(distinct id) INTO tmp_int_list FROM actor.org_unit_ancestors(param_pref_ou);
3111 luri_org_list := luri_org_list || tmp_int_list;
3114 OPEN core_cursor FOR EXECUTE param_query;
3118 FETCH core_cursor INTO core_result;
3119 EXIT WHEN NOT FOUND;
3120 EXIT WHEN total_count >= core_limit;
3122 total_count := total_count + 1;
3124 CONTINUE WHEN total_count NOT BETWEEN core_offset + 1 AND check_limit + core_offset;
3126 check_count := check_count + 1;
3128 PERFORM 1 FROM biblio.record_entry b WHERE NOT b.deleted AND b.id IN ( SELECT * FROM unnest( core_result.records ) );
3130 -- RAISE NOTICE ' % were all deleted ... ', core_result.records;
3131 deleted_count := deleted_count + 1;
3136 FROM biblio.record_entry b
3137 JOIN config.bib_source s ON (b.source = s.id)
3138 WHERE s.transcendant
3139 AND b.id IN ( SELECT * FROM unnest( core_result.records ) );
3142 -- RAISE NOTICE ' % were all transcendant ... ', core_result.records;
3143 visible_count := visible_count + 1;
3145 current_res.id = core_result.id;
3146 current_res.rel = core_result.rel;
3150 SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
3154 current_res.record = core_result.records[1];
3156 current_res.record = NULL;
3159 RETURN NEXT current_res;
3165 FROM asset.call_number cn
3166 JOIN asset.uri_call_number_map map ON (map.call_number = cn.id)
3167 JOIN asset.uri uri ON (map.uri = uri.id)
3168 WHERE NOT cn.deleted
3169 AND cn.label = '##URI##'
3171 AND ( param_locations IS NULL OR array_upper(param_locations, 1) IS NULL )
3172 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3173 AND cn.owning_lib IN ( SELECT * FROM unnest( luri_org_list ) )
3177 -- RAISE NOTICE ' % have at least one URI ... ', core_result.records;
3178 visible_count := visible_count + 1;
3180 current_res.id = core_result.id;
3181 current_res.rel = core_result.rel;
3185 SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
3189 current_res.record = core_result.records[1];
3191 current_res.record = NULL;
3194 RETURN NEXT current_res;
3199 IF param_statuses IS NOT NULL AND array_upper(param_statuses, 1) > 0 THEN
3202 FROM asset.call_number cn
3203 JOIN asset.copy cp ON (cp.call_number = cn.id)
3204 WHERE NOT cn.deleted
3206 AND cp.status IN ( SELECT * FROM unnest( param_statuses ) )
3207 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3208 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3213 FROM biblio.peer_bib_copy_map pr
3214 JOIN asset.copy cp ON (cp.id = pr.target_copy)
3215 WHERE NOT cp.deleted
3216 AND cp.status IN ( SELECT * FROM unnest( param_statuses ) )
3217 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3218 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3222 -- RAISE NOTICE ' % and multi-home linked records were all status-excluded ... ', core_result.records;
3223 excluded_count := excluded_count + 1;
3230 IF param_locations IS NOT NULL AND array_upper(param_locations, 1) > 0 THEN
3233 FROM asset.call_number cn
3234 JOIN asset.copy cp ON (cp.call_number = cn.id)
3235 WHERE NOT cn.deleted
3237 AND cp.location IN ( SELECT * FROM unnest( param_locations ) )
3238 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3239 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3244 FROM biblio.peer_bib_copy_map pr
3245 JOIN asset.copy cp ON (cp.id = pr.target_copy)
3246 WHERE NOT cp.deleted
3247 AND cp.location IN ( SELECT * FROM unnest( param_locations ) )
3248 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3249 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3253 -- RAISE NOTICE ' % and multi-home linked records were all copy_location-excluded ... ', core_result.records;
3254 excluded_count := excluded_count + 1;
3261 IF staff IS NULL OR NOT staff THEN
3264 FROM asset.opac_visible_copies
3265 WHERE circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3266 AND record IN ( SELECT * FROM unnest( core_result.records ) )
3271 FROM biblio.peer_bib_copy_map pr
3272 JOIN asset.opac_visible_copies cp ON (cp.copy_id = pr.target_copy)
3273 WHERE cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3274 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3279 -- RAISE NOTICE ' % and multi-home linked records were all visibility-excluded ... ', core_result.records;
3280 excluded_count := excluded_count + 1;
3288 FROM asset.call_number cn
3289 JOIN asset.copy cp ON (cp.call_number = cn.id)
3290 WHERE NOT cn.deleted
3292 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3293 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3299 FROM biblio.peer_bib_copy_map pr
3300 JOIN asset.copy cp ON (cp.id = pr.target_copy)
3301 WHERE NOT cp.deleted
3302 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3303 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3309 FROM asset.call_number cn
3310 JOIN asset.copy cp ON (cp.call_number = cn.id)
3311 WHERE cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3316 -- RAISE NOTICE ' % and multi-home linked records were all visibility-excluded ... ', core_result.records;
3317 excluded_count := excluded_count + 1;
3326 visible_count := visible_count + 1;
3328 current_res.id = core_result.id;
3329 current_res.rel = core_result.rel;
3333 SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
3337 current_res.record = core_result.records[1];
3339 current_res.record = NULL;
3342 RETURN NEXT current_res;
3344 IF visible_count % 1000 = 0 THEN
3345 -- RAISE NOTICE ' % visible so far ... ', visible_count;
3350 current_res.id = NULL;
3351 current_res.rel = NULL;
3352 current_res.record = NULL;
3353 current_res.total = total_count;
3354 current_res.checked = check_count;
3355 current_res.deleted = deleted_count;
3356 current_res.visible = visible_count;
3357 current_res.excluded = excluded_count;
3361 RETURN NEXT current_res;
3364 $func$ LANGUAGE PLPGSQL;
3368 SELECT evergreen.upgrade_deps_block_check('0788', :eg_version);
3370 -- New view including 264 as a potential tag for publisher and pubdate
3371 CREATE OR REPLACE VIEW reporter.old_super_simple_record AS
3377 FIRST(title.value) AS title,
3378 FIRST(author.value) AS author,
3379 ARRAY_TO_STRING(ARRAY_ACCUM( DISTINCT publisher.value), ', ') AS publisher,
3380 ARRAY_TO_STRING(ARRAY_ACCUM( DISTINCT SUBSTRING(pubdate.value FROM $$\d+$$) ), ', ') AS pubdate,
3381 ARRAY_ACCUM( DISTINCT REPLACE(SUBSTRING(isbn.value FROM $$^\S+$$), '-', '') ) AS isbn,
3382 ARRAY_ACCUM( DISTINCT REGEXP_REPLACE(issn.value, E'^\\S*(\\d{4})[-\\s](\\d{3,4}x?)', E'\\1 \\2') ) AS issn
3383 FROM biblio.record_entry r
3384 LEFT JOIN metabib.full_rec title ON (r.id = title.record AND title.tag = '245' AND title.subfield = 'a')
3385 LEFT JOIN metabib.full_rec author ON (r.id = author.record AND author.tag IN ('100','110','111') AND author.subfield = 'a')
3386 LEFT JOIN metabib.full_rec publisher ON (r.id = publisher.record AND (publisher.tag = '260' OR (publisher.tag = '264' AND publisher.ind2 = '1')) AND publisher.subfield = 'b')
3387 LEFT JOIN metabib.full_rec pubdate ON (r.id = pubdate.record AND (pubdate.tag = '260' OR (pubdate.tag = '264' AND pubdate.ind2 = '1')) AND pubdate.subfield = 'c')
3388 LEFT JOIN metabib.full_rec isbn ON (r.id = isbn.record AND isbn.tag IN ('024', '020') AND isbn.subfield IN ('a','z'))
3389 LEFT JOIN metabib.full_rec issn ON (r.id = issn.record AND issn.tag = '022' AND issn.subfield = 'a')
3392 -- Update reporter.materialized_simple_record with new 264-based values for publisher and pubdate
3393 DELETE FROM reporter.materialized_simple_record WHERE id IN (
3394 SELECT DISTINCT record FROM metabib.full_rec WHERE tag = '264' AND subfield IN ('b', 'c')
3397 INSERT INTO reporter.materialized_simple_record
3398 SELECT DISTINCT rossr.* FROM reporter.old_super_simple_record rossr INNER JOIN metabib.full_rec mfr ON mfr.record = rossr.id
3399 WHERE mfr.tag = '264' AND mfr.subfield IN ('b', 'c')
3402 SELECT evergreen.upgrade_deps_block_check('0789', :eg_version);
3403 SELECT evergreen.upgrade_deps_block_check('0790', :eg_version);
3405 ALTER TABLE config.metabib_class ADD COLUMN combined BOOL NOT NULL DEFAULT FALSE;
3406 UPDATE config.metabib_class SET combined = TRUE WHERE name = 'subject';
3409 --Check if we can apply the upgrade.
3410 SELECT evergreen.upgrade_deps_block_check('0791', :eg_version);
3414 CREATE OR REPLACE FUNCTION search.query_parser_fts (
3416 param_search_ou INT,
3419 param_statuses INT[],
3420 param_locations INT[],
3426 deleted_search BOOL,
3427 param_pref_ou INT DEFAULT NULL
3428 ) RETURNS SETOF search.search_result AS $func$
3431 current_res search.search_result%ROWTYPE;
3432 search_org_list INT[];
3433 luri_org_list INT[];
3442 core_cursor REFCURSOR;
3443 core_rel_query TEXT;
3445 total_count INT := 0;
3446 check_count INT := 0;
3447 deleted_count INT := 0;
3448 visible_count INT := 0;
3449 excluded_count INT := 0;
3453 check_limit := COALESCE( param_check, 1000 );
3454 core_limit := COALESCE( param_limit, 25000 );
3455 core_offset := COALESCE( param_offset, 0 );
3457 -- core_skip_chk := COALESCE( param_skip_chk, 1 );
3459 IF param_search_ou > 0 THEN
3460 IF param_depth IS NOT NULL THEN
3461 SELECT array_accum(distinct id) INTO search_org_list FROM actor.org_unit_descendants( param_search_ou, param_depth );
3463 SELECT array_accum(distinct id) INTO search_org_list FROM actor.org_unit_descendants( param_search_ou );
3466 SELECT array_accum(distinct id) INTO luri_org_list FROM actor.org_unit_ancestors( param_search_ou );
3468 ELSIF param_search_ou < 0 THEN
3469 SELECT array_accum(distinct org_unit) INTO search_org_list FROM actor.org_lasso_map WHERE lasso = -param_search_ou;
3471 FOR tmp_int IN SELECT * FROM UNNEST(search_org_list) LOOP
3472 SELECT array_accum(distinct id) INTO tmp_int_list FROM actor.org_unit_ancestors( tmp_int );
3473 luri_org_list := luri_org_list || tmp_int_list;
3476 SELECT array_accum(DISTINCT x.id) INTO luri_org_list FROM UNNEST(luri_org_list) x(id);
3478 ELSIF param_search_ou = 0 THEN
3479 -- reserved for user lassos (ou_buckets/type='lasso') with ID passed in depth ... hack? sure.
3482 IF param_pref_ou IS NOT NULL THEN
3483 SELECT array_accum(distinct id) INTO tmp_int_list FROM actor.org_unit_ancestors(param_pref_ou);
3484 luri_org_list := luri_org_list || tmp_int_list;
3487 OPEN core_cursor FOR EXECUTE param_query;
3491 FETCH core_cursor INTO core_result;
3492 EXIT WHEN NOT FOUND;
3493 EXIT WHEN total_count >= core_limit;
3495 total_count := total_count + 1;
3497 CONTINUE WHEN total_count NOT BETWEEN core_offset + 1 AND check_limit + core_offset;
3499 check_count := check_count + 1;
3501 IF NOT deleted_search THEN
3503 PERFORM 1 FROM biblio.record_entry b WHERE NOT b.deleted AND b.id IN ( SELECT * FROM unnest( core_result.records ) );
3505 -- RAISE NOTICE ' % were all deleted ... ', core_result.records;
3506 deleted_count := deleted_count + 1;
3511 FROM biblio.record_entry b
3512 JOIN config.bib_source s ON (b.source = s.id)
3513 WHERE s.transcendant
3514 AND b.id IN ( SELECT * FROM unnest( core_result.records ) );
3517 -- RAISE NOTICE ' % were all transcendant ... ', core_result.records;
3518 visible_count := visible_count + 1;
3520 current_res.id = core_result.id;
3521 current_res.rel = core_result.rel;
3525 SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
3529 current_res.record = core_result.records[1];
3531 current_res.record = NULL;
3534 RETURN NEXT current_res;
3540 FROM asset.call_number cn
3541 JOIN asset.uri_call_number_map map ON (map.call_number = cn.id)
3542 JOIN asset.uri uri ON (map.uri = uri.id)
3543 WHERE NOT cn.deleted
3544 AND cn.label = '##URI##'
3546 AND ( param_locations IS NULL OR array_upper(param_locations, 1) IS NULL )
3547 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3548 AND cn.owning_lib IN ( SELECT * FROM unnest( luri_org_list ) )
3552 -- RAISE NOTICE ' % have at least one URI ... ', core_result.records;
3553 visible_count := visible_count + 1;
3555 current_res.id = core_result.id;
3556 current_res.rel = core_result.rel;
3560 SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
3564 current_res.record = core_result.records[1];
3566 current_res.record = NULL;
3569 RETURN NEXT current_res;
3574 IF param_statuses IS NOT NULL AND array_upper(param_statuses, 1) > 0 THEN
3577 FROM asset.call_number cn
3578 JOIN asset.copy cp ON (cp.call_number = cn.id)
3579 WHERE NOT cn.deleted
3581 AND cp.status IN ( SELECT * FROM unnest( param_statuses ) )
3582 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3583 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3588 FROM biblio.peer_bib_copy_map pr
3589 JOIN asset.copy cp ON (cp.id = pr.target_copy)
3590 WHERE NOT cp.deleted
3591 AND cp.status IN ( SELECT * FROM unnest( param_statuses ) )
3592 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3593 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3597 -- RAISE NOTICE ' % and multi-home linked records were all status-excluded ... ', core_result.records;
3598 excluded_count := excluded_count + 1;
3605 IF param_locations IS NOT NULL AND array_upper(param_locations, 1) > 0 THEN
3608 FROM asset.call_number cn
3609 JOIN asset.copy cp ON (cp.call_number = cn.id)
3610 WHERE NOT cn.deleted
3612 AND cp.location IN ( SELECT * FROM unnest( param_locations ) )
3613 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3614 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3619 FROM biblio.peer_bib_copy_map pr
3620 JOIN asset.copy cp ON (cp.id = pr.target_copy)
3621 WHERE NOT cp.deleted
3622 AND cp.location IN ( SELECT * FROM unnest( param_locations ) )
3623 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3624 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3628 -- RAISE NOTICE ' % and multi-home linked records were all copy_location-excluded ... ', core_result.records;
3629 excluded_count := excluded_count + 1;
3636 IF staff IS NULL OR NOT staff THEN
3639 FROM asset.opac_visible_copies
3640 WHERE circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3641 AND record IN ( SELECT * FROM unnest( core_result.records ) )
3646 FROM biblio.peer_bib_copy_map pr
3647 JOIN asset.opac_visible_copies cp ON (cp.copy_id = pr.target_copy)
3648 WHERE cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3649 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3654 -- RAISE NOTICE ' % and multi-home linked records were all visibility-excluded ... ', core_result.records;
3655 excluded_count := excluded_count + 1;
3663 FROM asset.call_number cn
3664 JOIN asset.copy cp ON (cp.call_number = cn.id)
3665 WHERE NOT cn.deleted
3667 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3668 AND cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3674 FROM biblio.peer_bib_copy_map pr
3675 JOIN asset.copy cp ON (cp.id = pr.target_copy)
3676 WHERE NOT cp.deleted
3677 AND cp.circ_lib IN ( SELECT * FROM unnest( search_org_list ) )
3678 AND pr.peer_record IN ( SELECT * FROM unnest( core_result.records ) )
3684 FROM asset.call_number cn
3685 JOIN asset.copy cp ON (cp.call_number = cn.id)
3686 WHERE cn.record IN ( SELECT * FROM unnest( core_result.records ) )
3691 -- RAISE NOTICE ' % and multi-home linked records were all visibility-excluded ... ', core_result.records;
3692 excluded_count := excluded_count + 1;
3703 visible_count := visible_count + 1;
3705 current_res.id = core_result.id;
3706 current_res.rel = core_result.rel;
3710 SELECT COUNT(DISTINCT s.source) INTO tmp_int FROM metabib.metarecord_source_map s WHERE s.metarecord = core_result.id;
3714 current_res.record = core_result.records[1];
3716 current_res.record = NULL;
3719 RETURN NEXT current_res;
3721 IF visible_count % 1000 = 0 THEN
3722 -- RAISE NOTICE ' % visible so far ... ', visible_count;
3727 current_res.id = NULL;
3728 current_res.rel = NULL;
3729 current_res.record = NULL;
3730 current_res.total = total_count;
3731 current_res.checked = check_count;
3732 current_res.deleted = deleted_count;
3733 current_res.visible = visible_count;
3734 current_res.excluded = excluded_count;
3738 RETURN NEXT current_res;
3741 $func$ LANGUAGE PLPGSQL;
3744 -- AFTER UPDATE OR INSERT trigger for biblio.record_entry
3745 CREATE OR REPLACE FUNCTION biblio.indexing_ingest_or_delete () RETURNS TRIGGER AS $func$
3747 transformed_xml TEXT;
3750 xfrm config.xml_transform%ROWTYPE;
3752 new_attrs HSTORE := ''::HSTORE;
3753 attr_def config.record_attr_definition%ROWTYPE;
3756 IF NEW.deleted IS TRUE THEN -- If this bib is deleted
3757 PERFORM * FROM config.internal_flag WHERE
3758 name = 'ingest.metarecord_mapping.preserve_on_delete' AND enabled;
3760 -- One needs to keep these around to support searches
3761 -- with the #deleted modifier, so one should turn on the named
3762 -- internal flag for that functionality.
3763 DELETE FROM metabib.metarecord_source_map WHERE source = NEW.id;
3764 DELETE FROM metabib.record_attr WHERE id = NEW.id;
3767 DELETE FROM authority.bib_linking WHERE bib = NEW.id; -- Avoid updating fields in bibs that are no longer visible
3768 DELETE FROM biblio.peer_bib_copy_map WHERE peer_record = NEW.id; -- Separate any multi-homed items
3769 DELETE FROM metabib.browse_entry_def_map WHERE source = NEW.id; -- Don't auto-suggest deleted bibs
3770 RETURN NEW; -- and we're done
3773 IF TG_OP = 'UPDATE' THEN -- re-ingest?
3774 PERFORM * FROM config.internal_flag WHERE name = 'ingest.reingest.force_on_same_marc' AND enabled;
3776 IF NOT FOUND AND OLD.marc = NEW.marc THEN -- don't do anything if the MARC didn't change
3781 -- Record authority linking
3782 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_authority_linking' AND enabled;
3784 PERFORM biblio.map_authority_linking( NEW.id, NEW.marc );
3787 -- Flatten and insert the mfr data
3788 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_metabib_full_rec' AND enabled;
3790 PERFORM metabib.reingest_metabib_full_rec(NEW.id);
3792 -- Now we pull out attribute data, which is dependent on the mfr for all but XPath-based fields
3793 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_metabib_rec_descriptor' AND enabled;
3795 FOR attr_def IN SELECT * FROM config.record_attr_definition ORDER BY format LOOP
3797 IF attr_def.tag IS NOT NULL THEN -- tag (and optional subfield list) selection
3798 SELECT ARRAY_TO_STRING(ARRAY_ACCUM(value), COALESCE(attr_def.joiner,' ')) INTO attr_value
3799 FROM (SELECT * FROM metabib.full_rec ORDER BY tag, subfield) AS x
3800 WHERE record = NEW.id
3801 AND tag LIKE attr_def.tag
3803 WHEN attr_def.sf_list IS NOT NULL
3804 THEN POSITION(subfield IN attr_def.sf_list) > 0
3811 ELSIF attr_def.fixed_field IS NOT NULL THEN -- a named fixed field, see config.marc21_ff_pos_map.fixed_field
3812 attr_value := biblio.marc21_extract_fixed_field(NEW.id, attr_def.fixed_field);
3814 ELSIF attr_def.xpath IS NOT NULL THEN -- and xpath expression
3816 SELECT INTO xfrm * FROM config.xml_transform WHERE name = attr_def.format;
3818 -- See if we can skip the XSLT ... it's expensive
3819 IF prev_xfrm IS NULL OR prev_xfrm <> xfrm.name THEN
3820 -- Can't skip the transform
3821 IF xfrm.xslt <> '---' THEN
3822 transformed_xml := oils_xslt_process(NEW.marc,xfrm.xslt);
3824 transformed_xml := NEW.marc;
3827 prev_xfrm := xfrm.name;
3830 IF xfrm.name IS NULL THEN
3831 -- just grab the marcxml (empty) transform
3832 SELECT INTO xfrm * FROM config.xml_transform WHERE xslt = '---' LIMIT 1;
3833 prev_xfrm := xfrm.name;
3836 attr_value := oils_xpath_string(attr_def.xpath, transformed_xml, COALESCE(attr_def.joiner,' '), ARRAY[ARRAY[xfrm.prefix, xfrm.namespace_uri]]);
3838 ELSIF attr_def.phys_char_sf IS NOT NULL THEN -- a named Physical Characteristic, see config.marc21_physical_characteristic_*_map
3839 SELECT m.value INTO attr_value
3840 FROM biblio.marc21_physical_characteristics(NEW.id) v
3841 JOIN config.marc21_physical_characteristic_value_map m ON (m.id = v.value)
3842 WHERE v.subfield = attr_def.phys_char_sf
3843 LIMIT 1; -- Just in case ...
3847 -- apply index normalizers to attr_value
3849 SELECT n.func AS func,
3850 n.param_count AS param_count,
3852 FROM config.index_normalizer n
3853 JOIN config.record_attr_index_norm_map m ON (m.norm = n.id)
3854 WHERE attr = attr_def.name
3856 EXECUTE 'SELECT ' || normalizer.func || '(' ||
3857 COALESCE( quote_literal( attr_value ), 'NULL' ) ||
3859 WHEN normalizer.param_count > 0
3860 THEN ',' || REPLACE(REPLACE(BTRIM(normalizer.params,'[]'),E'\'',E'\\\''),E'"',E'\'')
3863 ')' INTO attr_value;
3867 -- Add the new value to the hstore
3868 new_attrs := new_attrs || hstore( attr_def.name, attr_value );
3872 IF TG_OP = 'INSERT' OR OLD.deleted THEN -- initial insert OR revivication
3873 DELETE FROM metabib.record_attr WHERE id = NEW.id;
3874 INSERT INTO metabib.record_attr (id, attrs) VALUES (NEW.id, new_attrs);
3876 UPDATE metabib.record_attr SET attrs = new_attrs WHERE id = NEW.id;
3882 -- Gather and insert the field entry data
3883 PERFORM metabib.reingest_metabib_field_entries(NEW.id);
3885 -- Located URI magic
3886 IF TG_OP = 'INSERT' THEN
3887 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_located_uri' AND enabled;
3889 PERFORM biblio.extract_located_uris( NEW.id, NEW.marc, NEW.editor );
3892 PERFORM * FROM config.internal_flag WHERE name = 'ingest.disable_located_uri' AND enabled;
3894 PERFORM biblio.extract_located_uris( NEW.id, NEW.marc, NEW.editor );
3898 -- (re)map metarecord-bib linking
3899 IF TG_OP = 'INSERT' THEN -- if not deleted and performing an insert, check for the flag
3900 PERFORM * FROM config.internal_flag WHERE name = 'ingest.metarecord_mapping.skip_on_insert' AND enabled;
3902 PERFORM metabib.remap_metarecord_for_bib( NEW.id, NEW.fingerprint );
3904 ELSE -- we're doing an update, and we're not deleted, remap
3905 PERFORM * FROM config.internal_flag WHERE name = 'ingest.metarecord_mapping.skip_on_update' AND enabled;
3907 PERFORM metabib.remap_metarecord_for_bib( NEW.id, NEW.fingerprint );
3913 $func$ LANGUAGE PLPGSQL;
3915 SELECT evergreen.upgrade_deps_block_check('0792', :eg_version);
3917 UPDATE permission.perm_list SET code = 'URL_VERIFY_UPDATE_SETTINGS' WHERE id = 544 AND code = '544';
3920 SELECT evergreen.upgrade_deps_block_check('0793', :eg_version);
3922 UPDATE config.best_hold_order
3933 WHERE name = 'Traditional' AND
3941 UPDATE config.best_hold_order
3952 WHERE name = 'Traditional with Holds-always-go-home' AND
3962 UPDATE config.best_hold_order
3973 WHERE name = 'Traditional with Holds-go-home' AND
3986 -- These are from 0789, and can and should be run outside of a transaction
3987 CREATE TEXT SEARCH CONFIGURATION title ( COPY = english_nostop );
3988 CREATE TEXT SEARCH CONFIGURATION author ( COPY = english_nostop );
3989 CREATE TEXT SEARCH CONFIGURATION subject ( COPY = english_nostop );
3990 CREATE TEXT SEARCH CONFIGURATION series ( COPY = english_nostop );
3991 CREATE TEXT SEARCH CONFIGURATION identifier ( COPY = english_nostop );
3993 \qecho Please run Open-ILS/src/sql/Pg/version-upgrade/2.3-2.4-supplemental.sh now, which contains additional required SQL to complete your Evergreen upgrade!