package C4::Matcher;
-# Copyright (C) 2007 LibLime
+# Copyright (C) 2007 LibLime, 2012 C & P Bibliography Services
#
# This file is part of Koha.
#
-# Koha is free software; you can redistribute it and/or modify it under the
-# terms of the GNU General Public License as published by the Free Software
-# Foundation; either version 2 of the License, or (at your option) any later
-# version.
+# Koha is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
#
-# Koha is distributed in the hope that it will be useful, but WITHOUT ANY
-# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
-# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+# Koha is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
#
-# You should have received a copy of the GNU General Public License along
-# with Koha; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# You should have received a copy of the GNU General Public License
+# along with Koha; if not, see <http://www.gnu.org/licenses>.
-use strict;
-use warnings;
+use Modern::Perl;
-use C4::Context;
-use MARC::Record;
-use C4::Search;
-use C4::Biblio;
-use vars qw($VERSION);
-
-BEGIN {
- # set the version for version checking
- $VERSION = 3.01;
-}
+use Koha::SearchEngine;
+use Koha::SearchEngine::Search;
+use Koha::SearchEngine::QueryBuilder;
+use Koha::Util::Normalize qw(
+ ISBN
+ legacy_default
+ lower_case
+ remove_spaces
+ upper_case
+);
=head1 NAME
$matcher->add_matchpoint('isbn', 1000, [ { tag => '020', subfields => 'a', norms => [] } ]);
$matcher->add_simple_required_check('245', 'a', -1, 0, '', '245', 'a', -1, 0, '');
- $matcher->add_required_check([ { tag => '245', subfields => 'a', norms => [] } ],
+ $matcher->add_required_check([ { tag => '245', subfields => 'a', norms => [] } ],
[ { tag => '245', subfields => 'a', norms => [] } ]);
my @matches = $matcher->get_matches($marc_record, $max_matches);
return @results;
}
+=head2 GetMatcherId
+
+ my $matcher_id = C4::Matcher::GetMatcherId($code);
+
+Returns the matcher_id of a code.
+
+=cut
+
+sub GetMatcherId {
+ my ($code) = @_;
+ my $dbh = C4::Context->dbh;
+
+ my $matcher_id = $dbh->selectrow_array("SELECT matcher_id FROM marc_matchers WHERE code = ?", undef, $code);
+ return $matcher_id;
+}
+
=head1 METHODS
=head2 new
$sth->execute($id);
my $row = $sth->fetchrow_hashref;
$sth->finish();
- return undef unless defined $row;
+ return unless defined $row;
my $self = {};
$self->{'id'} = $row->{'matcher_id'};
my $matcher_id = $self->{'id'};
$sth = $dbh->prepare_cached("INSERT INTO matchpoints (matcher_id, search_index, score)
VALUES (?, ?, ?)");
- $sth->execute($matcher_id, $matchpoint->{'index'}, $matchpoint->{'score'});
+ $sth->execute($matcher_id, $matchpoint->{'index'}, $matchpoint->{'score'}||0);
my $matchpoint_id = $dbh->{'mysql_insertid'};
my $seqnum = 0;
foreach my $component (@{ $matchpoint->{'components'} }) {
$seqnum++;
$sth = $dbh->prepare_cached("INSERT INTO matchpoint_components
- (matchpoint_id, sequence, tag, subfields, offset, length)
+ (matchpoint_id, sequence, tag, subfields, `offset`, length)
VALUES (?, ?, ?, ?, ?, ?)");
$sth->bind_param(1, $matchpoint_id);
$sth->bind_param(2, $seqnum);
$sth->bind_param(3, $component->{'tag'});
$sth->bind_param(4, join "", sort keys %{ $component->{'subfields'} });
- $sth->bind_param(5, $component->{'offset'});
+ $sth->bind_param(5, $component->{'offset'}||0);
$sth->bind_param(6, $component->{'length'});
$sth->execute();
my $matchpoint_component_id = $dbh->{'mysql_insertid'};
$sth->execute($matcher_id); # relying on cascading deletes to clean up everything
}
+=head2 record_type
+
+ $matcher->record_type('biblio');
+ my $record_type = $matcher->record_type();
+
+Accessor method.
+
+=cut
+
+sub record_type {
+ my $self = shift;
+ @_ ? $self->{'record_type'} = shift : $self->{'record_type'};
+}
+
=head2 threshold
$matcher->threshold(1000);
);
}
-=head2 find_matches
+=head2 get_matches
my @matches = $matcher->get_matches($marc_record, $max_matches);
foreach $match (@matches) {
my $self = shift;
my ($source_record, $max_matches) = @_;
- my %matches = ();
+ my $matches = {};
+
+ foreach my $matchpoint ( @{ $self->{'matchpoints'} } ) {
+ my @source_keys = _get_match_keys( $source_record, $matchpoint );
- foreach my $matchpoint (@{ $self->{'matchpoints'} }) {
- my @source_keys = _get_match_keys($source_record, $matchpoint);
next if scalar(@source_keys) == 0;
+
+ @source_keys = C4::Koha::GetVariationsOfISBNs(@source_keys)
+ if ( $matchpoint->{index} =~ /^isbn$/i
+ && C4::Context->preference('AggressiveMatchOnISBN') );
+
+ @source_keys = C4::Koha::GetVariationsOfISSNs(@source_keys)
+ if ( $matchpoint->{index} =~ /^issn$/i
+ && C4::Context->preference('AggressiveMatchOnISSN') );
+
# build query
- my $query = join(" or ", map { "$matchpoint->{'index'}=$_" } @source_keys);
- # FIXME only searching biblio index at the moment
- my ($error, $searchresults, $total_hits) = SimpleSearch($query, 0, $max_matches);
-
- if (defined $error ) {
- warn "search failed ($query) $error";
- } else {
- foreach my $matched (@{$searchresults}) {
- $matches{$matched} += $matchpoint->{'score'};
+ my $query;
+ my $error;
+ my $searchresults;
+ my $total_hits;
+ if ( $self->{'record_type'} eq 'biblio' ) {
+
+ my $phr = ( C4::Context->preference('AggressiveMatchOnISBN') || C4::Context->preference('AggressiveMatchOnISSN') ) ? ',phr' : q{};
+ $query = join( " OR ",
+ map { "$matchpoint->{'index'}$phr=\"$_\"" } @source_keys );
+ #NOTE: double-quote the values so you don't get a "Embedded truncation not supported" error when a term has a ? in it.
+
+ # Use state variables to avoid recreating the objects every time.
+ # With Elasticsearch this also avoids creating a massive amount of
+ # ES connectors that would eventually run out of file descriptors.
+ state $searcher = Koha::SearchEngine::Search->new({index => $Koha::SearchEngine::BIBLIOS_INDEX});
+ ( $error, $searchresults, $total_hits ) =
+ $searcher->simple_search_compat( $query, 0, $max_matches, undef, skip_normalize => 1 );
+
+ if ( defined $error ) {
+ warn "search failed ($query) $error";
+ }
+ else {
+ foreach my $matched ( @{$searchresults} ) {
+ my $target_record = C4::Search::new_record_from_zebra( 'biblioserver', $matched );
+ my ( $biblionumber_tag, $biblionumber_subfield ) = C4::Biblio::GetMarcFromKohaField( "biblio.biblionumber" );
+ my $id = ( $biblionumber_tag > 10 ) ?
+ $target_record->field($biblionumber_tag)->subfield($biblionumber_subfield) :
+ $target_record->field($biblionumber_tag)->data();
+ $matches->{$id}->{score} += $matchpoint->{score};
+ $matches->{$id}->{record} = $target_record;
+ }
+ }
+
+ }
+ elsif ( $self->{'record_type'} eq 'authority' ) {
+ my @marclist;
+ my @and_or;
+ my @excluding = [];
+ my @operator;
+ my @value;
+ foreach my $key (@source_keys) {
+ push @marclist, $matchpoint->{'index'};
+ push @and_or, 'or';
+ push @operator, 'exact';
+ push @value, $key;
+ }
+ # Use state variables to avoid recreating the objects every time.
+ # With Elasticsearch this also avoids creating a massive amount of
+ # ES connectors that would eventually run out of file descriptors.
+ state $builder = Koha::SearchEngine::QueryBuilder->new({index => $Koha::SearchEngine::AUTHORITIES_INDEX});
+ state $searcher = Koha::SearchEngine::Search->new({index => $Koha::SearchEngine::AUTHORITIES_INDEX});
+ my $search_query = $builder->build_authorities_query_compat(
+ \@marclist, \@and_or, \@excluding, \@operator,
+ \@value, undef, 'AuthidAsc'
+ );
+ my ( $authresults, $total ) = $searcher->search_auth_compat( $search_query, 0, 20 );
+
+ foreach my $result (@$authresults) {
+ my $id = $result->{authid};
+ $matches->{$id}->{score} += $matchpoint->{'score'};
+ $matches->{$id}->{record} = $id;
}
}
}
# get rid of any that don't meet the threshold
- %matches = map { ($matches{$_} >= $self->{'threshold'}) ? ($_ => $matches{$_}) : () } keys %matches;
-
- # get rid of any that don't meet the required checks
- %matches = map { _passes_required_checks($source_record, $_, $self->{'required_checks'}) ? ($_ => $matches{$_}) : () }
- keys %matches;
+ $matches = { map { ($matches->{$_}->{score} >= $self->{'threshold'}) ? ($_ => $matches->{$_}) : () } keys %$matches };
my @results = ();
- foreach my $marcblob (keys %matches) {
- my $target_record = MARC::Record->new_from_usmarc($marcblob);
- my $result = TransformMarcToKoha(C4::Context->dbh, $target_record, '');
- # FIXME - again, bibliospecific
- # also, can search engine be induced to give just the number in the first place?
- my $record_number = $result->{'biblionumber'};
- push @results, { 'record_id' => $record_number, 'score' => $matches{$marcblob} };
+ if ($self->{'record_type'} eq 'biblio') {
+ require C4::Biblio;
+ # get rid of any that don't meet the required checks
+ $matches = {
+ map {
+ _passes_required_checks( $source_record, $matches->{$_}->{'record'}, $self->{'required_checks'} )
+ ? ( $_ => $matches->{$_} )
+ : ()
+ } keys %$matches
+ };
+
+ foreach my $id ( keys %$matches ) {
+ push @results, {
+ record_id => $id,
+ score => $matches->{$id}->{score}
+ };
+ }
+ } elsif ($self->{'record_type'} eq 'authority') {
+ require C4::AuthoritiesMarc;
+ foreach my $id (keys %$matches) {
+ push @results, {
+ record_id => $id,
+ score => $matches->{$id}->{score}
+ };
+ }
}
- @results = sort { $b->{'score'} cmp $a->{'score'} } @results;
+ @results = sort {
+ $b->{'score'} cmp $a->{'score'} or
+ $b->{'record_id'} cmp $a->{'record_id'}
+ } @results;
if (scalar(@results) > $max_matches) {
@results = @results[0..$max_matches-1];
}
return @results;
-
}
=head2 dump
$result->{'matcher_id'} = $self->{'id'};
$result->{'code'} = $self->{'code'};
$result->{'description'} = $self->{'description'};
+ $result->{'record_type'} = $self->{'record_type'};
$result->{'matchpoints'} = [];
foreach my $matchpoint (@{ $self->{'matchpoints'} }) {
}
sub _passes_required_checks {
- my ($source_record, $target_blob, $matchchecks) = @_;
- my $target_record = MARC::Record->new_from_usmarc($target_blob); # FIXME -- need to avoid parsing record twice
+ my ($source_record, $target_record, $matchchecks) = @_;
# no checks supplied == automatic pass
return 1 if $#{ $matchchecks } == -1;
}
sub _get_match_keys {
+
my $source_record = shift;
my $matchpoint = shift;
my $check_only_first_repeat = @_ ? shift : 0;
# If there are two 003s and two 001s, there will be two keys:
# first 003 + first 001
# second 003 + second 001
-
+
my @keys = ();
for (my $i = 0; $i <= $#{ $matchpoint->{'components'} }; $i++) {
my $component = $matchpoint->{'components'}->[$i];
my $j = -1;
- FIELD: foreach my $field ($source_record->field($component->{'tag'})) {
+
+ my @fields = ();
+ my $tag = $component->{'tag'};
+ if ($tag && $tag eq 'LDR'){
+ $fields[0] = $source_record->leader();
+ }
+ else {
+ @fields = $source_record->field($tag);
+ }
+
+ FIELD: foreach my $field (@fields) {
$j++;
last FIELD if $j > 0 and $check_only_first_repeat;
last FIELD if $i > 0 and $j > $#keys;
- my $key = "";
- my $string;
- if ($field->is_control_field()) {
- $string=$field->data();
+
+ my $string;
+ if ( ! ref $field ){
+ $string = "$field";
+ }
+ elsif ( $field->is_control_field() ) {
+ $string = $field->data();
+ } elsif ( defined $component->{subfields} && keys %{$component->{subfields}} ){
+ $string = $field->as_string(
+ join('', keys %{ $component->{ subfields } }), ' ' # ' ' as separator
+ );
} else {
- foreach my $subfield ($field->subfields()) {
- if (exists $component->{'subfields'}->{$subfield->[0]}) {
- $string .= " " . $subfield->[1];
- }
- }
- }
+ $string = $field->as_string();
+ }
+
if ($component->{'length'}>0) {
- $string= substr($string, $component->{'offset'}, $component->{'length'});
- # FIXME normalize, substr
+ $string= substr($string, $component->{'offset'}, $component->{'length'});
} elsif ($component->{'offset'}) {
- $string= substr($string, $component->{'offset'});
+ $string= substr($string, $component->{'offset'});
}
- $key = _normalize($string);
+
+ my $norms = $component->{'norms'};
+ my $key = $string;
+
+ foreach my $norm ( @{ $norms } ) {
+ if ( grep { $norm eq $_ } valid_normalization_routines() ) {
+ if ( $norm eq 'remove_spaces' ) {
+ $key = remove_spaces($key);
+ }
+ elsif ( $norm eq 'upper_case' ) {
+ $key = upper_case($key);
+ }
+ elsif ( $norm eq 'lower_case' ) {
+ $key = lower_case($key);
+ }
+ elsif ( $norm eq 'legacy_default' ) {
+ $key = legacy_default($key);
+ }
+ elsif ( $norm eq 'ISBN' ) {
+ $key = ISBN($key);
+ }
+ } else {
+ warn "Invalid normalization routine required ($norm)"
+ unless $norm eq 'none';
+ }
+ }
+
if ($i == 0) {
push @keys, $key if $key;
} else {
return $component;
}
-# FIXME - default normalizer
-sub _normalize {
- my $value = uc shift;
- $value =~ s/[.;:,\]\[\)\(\/'"]//g;
- $value =~ s/^\s+//;
- #$value =~ s/^\s+$//;
- $value =~ s/\s+$//;
- $value =~ s/\s+/ /g;
- #$value =~ s/[.;,\]\[\)\(\/"']//g;
- return $value;
+sub valid_normalization_routines {
+
+ return (
+ 'remove_spaces',
+ 'upper_case',
+ 'lower_case',
+ 'legacy_default',
+ 'ISBN'
+ );
}
1;