use C4::Context;
use C4::Biblio;
use C4::Koha;
+use C4::Debug;
use C4::Charset;
use C4::Items;
use Unicode::Normalize;
use Time::HiRes qw(gettimeofday);
use Getopt::Long;
use IO::File;
+use Pod::Usage;
-binmode(STDOUT, ":utf8");
+binmode STDOUT, ':encoding(UTF-8)';
my ( $input_marc_file, $number, $offset) = ('',0,0);
my ($version, $delete, $test_parameter, $skip_marc8_conversion, $char_encoding, $verbose, $commit, $fk_off,$format,$biblios,$authorities,$keepids,$match, $isbn_check, $logfile);
-my ($sourcetag,$sourcesubfield,$idmapfl);
+my $cleanisbn = 1;
+my ($sourcetag,$sourcesubfield,$idmapfl, $dedup_barcode);
$|=1;
'x:s' => \$sourcetag,
'y:s' => \$sourcesubfield,
'idmap:s' => \$idmapfl,
+ 'cleanisbn!' => \$cleanisbn,
+ 'dedupbarcode' => \$dedup_barcode,
);
$biblios=!$authorities||$biblios;
if ($version || ($input_marc_file eq '')) {
- print <<EOF
-Small script to import bibliographic records into Koha.
-
-Parameters:
- h this version/help screen
- file /path/to/file/to/dump: the file to import
- v verbose mode. 1 means "some infos", 2 means "MARC dumping"
- fk Turn off foreign key checks during import.
- n the number of records to import. If missing, all the file is imported
- o file offset before importing, ie number of records to skip.
- commit the number of records to wait before performing a 'commit' operation
- l file logs actions done for each record and their status into file
- t test mode: parses the file, saying what he would do, but doing nothing.
- s skip automatic conversion of MARC-8 to UTF-8. This option is
- provided for debugging.
- c the characteristic MARC flavour. At the moment, only MARC21 and
- UNIMARC are supported. MARC21 by default.
- d delete EVERYTHING related to biblio in koha-DB before import. Tables:
- biblio, biblioitems, titems
- m format, MARCXML or ISO2709 (defaults to ISO2709)
- keepids field store ids in field (usefull for authorities, where 001 contains the authid for Koha, that can contain a very valuable info for authorities coming from LOC or BNF. useless for biblios probably)
- b|biblios type of import : bibliographic records
- a|authorities type of import : authority records
- match matchindex,fieldtomatch matchpoint to use to deduplicate
- fieldtomatch can be either 001 to 999
- or field and list of subfields as such 100abcde
- i|isbn if set, a search will be done on isbn, and, if the same isbn is found, the biblio is not added. It's another
- method to deduplicate.
- match & i can be both set.
- x source bib tag for reporting the source bib number
- y source subfield for reporting the source bib number
- idmap file for the koha bib and source id
- keepids store ids in 009 (usefull for authorities, where 001 contains the authid for Koha, that can contain a very valuable info for authorities coming from LOC or BNF. useless for biblios probably)
- b|biblios type of import : bibliographic records
- a|authorities type of import : authority records
- match matchindex,fieldtomatch matchpoint to use to deduplicate
- fieldtomatch can be either 001 to 999
- or field and list of subfields as such 100abcde
- i|isbn if set, a search will be done on isbn, and, if the same isbn is found, the biblio is not added. It's another
- method to deduplicate.
- match & i can be both set.
-IMPORTANT: don't use this script before you've entered and checked your MARC
- parameters tables twice (or more!). Otherwise, the import won't work
- correctly and you will get invalid data.
-
-SAMPLE:
- \$ export KOHA_CONF=/etc/koha.conf
- \$ perl misc/migration_tools/bulkmarcimport.pl -d -commit 1000 \\
- -file /home/jmf/koha.mrc -n 3000
-EOF
-;#'
-exit;
+ pod2usage( -verbose => 2 );
+ exit;
}
if (defined $idmapfl) {
# get records
eval { $record = $batch->next() };
if ( $@ ) {
- print "Bad MARC record: skipped\n";
+ print "Bad MARC record $i: $@ skipped\n";
# FIXME - because MARC::Batch->next() combines grabbing the next
# blob and parsing it into one operation, a correctable condition
# such as a MARC-8 record claiming that it's UTF-8 can't be recovered
}
my $isbn;
# remove trailing - in isbn (only for biblios, of course)
- if ($biblios) {
- if ($marcFlavour eq 'UNIMARC') {
- if (my $f010 = $record->field('010')) {
- $isbn = $f010->subfield('a');
- $isbn =~ s/-//g;
- $f010->update('a' => $isbn);
- }
- } else {
- if (my $f020 = $record->field('020')) {
- $isbn = $f020->subfield('a');
- $isbn =~ s/-//g;
- $f020->update('a' => $isbn);
- }
+ if ($biblios && $cleanisbn) {
+ my $tag = $marcFlavour eq 'UNIMARC' ? '010' : '020';
+ my $field = $record->field($tag);
+ my $isbn = $field && $field->subfield('a');
+ if ( $isbn ) {
+ $isbn =~ s/-//g;
+ $field->update('a' => $isbn);
}
}
my $id;
my $server=($authorities?'authorityserver':'biblioserver');
my ($error, $results,$totalhits)=C4::Search::SimpleSearch( $query, 0, 3, [$server] );
die "unable to search the database for duplicates : $error" if (defined $error);
- warn "$query $server : $totalhits";
- if ($results && scalar(@$results)==1){
+ #warn "$query $server : $totalhits";
+ if ( @{$results} == 1 ){
my $marcrecord = MARC::File::USMARC::decode($results->[0]);
$id=GetRecordId($marcrecord,$tagid,$subfieldid);
}
- elsif ($results && scalar(@$results)>1){
- warn "more than one match for $query";
+ elsif ( @{$results} > 1){
+ $debug && warn "more than one match for $query";
}
else {
- warn "nomatch for $query";
+ $debug && warn "nomatch for $query";
}
}
my $originalid;
printlog({id=>$id||$originalid||$biblionumber, op=>"insert",status=>"ok"}) if ($logfile);
}
eval { ( $itemnumbers_ref, $errors_ref ) = AddItemBatchFromMarc( $record, $biblionumber, $biblioitemnumber, '' ); };
- if ( $@ ) {
- warn "ERROR: Adding items to bib $biblionumber failed: $@\n";
+ my $error_adding = $@;
+ # Work on a clone so that if there are real errors, we can maybe
+ # fix them up later.
+ my $clone_record = $record->clone();
+ C4::Biblio::_strip_item_fields($clone_record, '');
+ # This sets the marc fields if there was an error, and also calls
+ # defer_marc_save.
+ ModBiblioMarc( $clone_record, $biblionumber, '' );
+ if ( $error_adding ) {
+ warn "ERROR: Adding items to bib $biblionumber failed: $error_adding";
printlog({id=>$id||$originalid||$biblionumber, op=>"insertitem",status=>"ERROR"}) if ($logfile);
# if we failed because of an exception, assume that
# the MARC columns in biblioitems were not set.
- ModBiblioMarc( $record, $biblionumber, '' );
next RECORD;
- }
+ }
else{
printlog({id=>$id||$originalid||$biblionumber, op=>"insert",status=>"ok"}) if ($logfile);
}
- if ($#{ $errors_ref } > -1) {
+ if ($dedup_barcode && grep { exists $_->{error_code} && $_->{error_code} eq 'duplicate_barcode' } @$errors_ref) {
+ # Find the record called 'barcode'
+ my ($tag, $sub) = C4::Biblio::GetMarcFromKohaField('items.barcode', '');
+ # Now remove any items that didn't have a duplicate_barcode error,
+ # erase the barcodes on items that did, and re-add those items.
+ my %dupes;
+ foreach my $i (0 .. $#{$errors_ref}) {
+ my $ref = $errors_ref->[$i];
+ if ($ref && ($ref->{error_code} eq 'duplicate_barcode')) {
+ $dupes{$ref->{item_sequence}} = 1;
+ # Delete the error message because we're going to
+ # retry this one.
+ delete $errors_ref->[$i];
+ }
+ }
+ my $seq = 0;
+ foreach my $field ($record->field($tag)) {
+ $seq++;
+ if ($dupes{$seq}) {
+ # Here we remove the barcode
+ $field->delete_subfield(code => $sub);
+ } else {
+ # otherwise we delete the field because we don't want
+ # two of them
+ $record->delete_fields($field);
+ }
+ }
+ # Now re-add the record as before, adding errors to the prev list
+ my $more_errors;
+ eval { ( $itemnumbers_ref, $more_errors ) = AddItemBatchFromMarc( $record, $biblionumber, $biblioitemnumber, '' ); };
+ if ( $@ ) {
+ warn "ERROR: Adding items to bib $biblionumber failed: $@\n";
+ printlog({id=>$id||$originalid||$biblionumber, op=>"insertitem",status=>"ERROR"}) if ($logfile);
+ # if we failed because of an exception, assume that
+ # the MARC columns in biblioitems were not set.
+ ModBiblioMarc( $record, $biblionumber, '' );
+ next RECORD;
+ } else {
+ printlog({id=>$id||$originalid||$biblionumber, op=>"insert",status=>"ok"}) if ($logfile);
+ }
+ push @$errors_ref, @{ $more_errors };
+ }
+ if ($#{ $errors_ref } > -1) {
report_item_errors($biblionumber, $errors_ref);
}
}
my $errors_ref = shift;
foreach my $error (@{ $errors_ref }) {
+ next if !$error;
my $msg = "Item not added (bib $biblionumber, item tag #$error->{'item_sequence'}, barcode $error->{'item_barcode'}): ";
my $error_code = $error->{'error_code'};
$error_code =~ s/_/ /g;
my $logelements=shift;
print $loghandle join (";",@$logelements{qw<id op status>}),"\n";
}
+
+
+=head1 NAME
+
+bulkmarcimport.pl - Import bibliographic/authority records into Koha
+
+=head1 USAGE
+
+ $ export KOHA_CONF=/etc/koha.conf
+ $ perl misc/migration_tools/bulkmarcimport.pl -d -commit 1000 \\
+ -file /home/jmf/koha.mrc -n 3000
+
+=head1 WARNING
+
+Don't use this script before you've entered and checked your MARC parameters
+tables twice (or more!). Otherwise, the import won't work correctly and you
+will get invalid data.
+
+=head1 DESCRIPTION
+
+=over
+
+=item B<-h>
+
+This version/help screen
+
+=item B<-b, -biblios>
+
+Type of import: bibliographic records
+
+=item B<-a, -authorities>
+
+Type of import: authority records
+
+=item B<-file>=I<FILE>
+
+The I<FILE> to import
+
+=item B<-v>
+
+Verbose mode. 1 means "some infos", 2 means "MARC dumping"
+
+=item B<-fk>
+
+Turn off foreign key checks during import.
+
+=item B<-n>=I<NUMBER>
+
+The I<NUMBER> of records to import. If missing, all the file is imported
+
+=item B<-o, -offset>=I<NUMBER>
+
+File offset before importing, ie I<NUMBER> of records to skip.
+
+=item B<-commit>=I<NUMBER>
+
+The I<NUMBER> of records to wait before performing a 'commit' operation
+
+=item B<-l>
+
+File logs actions done for each record and their status into file
+
+=item B<-t>
+
+Test mode: parses the file, saying what he would do, but doing nothing.
+
+=item B<-s>
+
+Skip automatic conversion of MARC-8 to UTF-8. This option is provided for
+debugging.
+
+=item B<-c>=I<CHARACTERISTIC>
+
+The I<CHARACTERISTIC> MARC flavour. At the moment, only I<MARC21> and
+I<UNIMARC> are supported. MARC21 by default.
+
+=item B<-d>
+
+Delete EVERYTHING related to biblio in koha-DB before import. Tables: biblio,
+biblioitems, items
+
+=item B<-m>=I<FORMAT>
+
+Input file I<FORMAT>: I<MARCXML> or I<ISO2709> (defaults to ISO2709)
+
+=item B<-k, -keepids>=<FIELD>
+
+Field store ids in I<FIELD> (usefull for authorities, where 001 contains the
+authid for Koha, that can contain a very valuable info for authorities coming
+from LOC or BNF. useless for biblios probably)
+
+=item B<-match>=<FIELD>
+
+I<FIELD> matchindex,fieldtomatch matchpoint to use to deduplicate fieldtomatch
+can be either 001 to 999 or field and list of subfields as such 100abcde
+
+=item B<-i,-isbn>
+
+If set, a search will be done on isbn, and, if the same isbn is found, the
+biblio is not added. It's another method to deduplicate. B<-match> & B<-isbn>
+can be both set.
+
+=item B<-cleanisbn>
+
+Clean ISBN fields from entering biblio records, ie removes hyphens. By default,
+ISBN are cleaned. --nocleanisbn will keep ISBN unchanged.
+
+=item B<-x>=I<TAG>
+
+Source bib I<TAG> for reporting the source bib number
+
+=item B<-y>=I<SUBFIELD>
+
+Source I<SUBFIELD> for reporting the source bib number
+
+=item B<-idmap>=I<FILE>
+
+I<FILE> for the koha bib and source id
+
+=item B<-keepids>
+
+Store ids in 009 (usefull for authorities, where 001 contains the authid for
+Koha, that can contain a very valuable info for authorities coming from LOC or
+BNF. useless for biblios probably)
+
+=item B<-dedupbarcode>
+
+If set, whenever a duplicate barcode is detected, it is removed and the attempt
+to add the record is retried, thereby giving the record a blank barcode. This
+is useful when something has set barcodes to be a biblio ID, or similar
+(usually other software.)
+
+=back
+
+=cut
+