Difference between revisions of "User:Francis Tyers/TLH"

From Apertium
Jump to navigation Jump to search
Line 50: Line 50:
   
 
==Tarea 3==
 
==Tarea 3==
  +
  +
$ for i in `seq 1 10`; do
  +
t3 -l $i train.ngrams train.lex < LexEsp-0.raw > LexEsp-0.l$i.t3;
  +
evaluate.pl LexEsp-0.cooked LexEsp-0.l$i.t3 >> output.l;
  +
done
  +
  +
$ cat output.l
  +
418 sentences
  +
LexEsp-0.l1.t3 9411 456 95.379%
  +
418 sentences
  +
LexEsp-0.l2.t3 9466 401 95.936%
  +
418 sentences
  +
LexEsp-0.l3.t3 9492 375 96.199%
  +
418 sentences
  +
LexEsp-0.l4.t3 9490 377 96.179%
  +
418 sentences
  +
LexEsp-0.l5.t3 9473 394 96.007%
  +
418 sentences
  +
LexEsp-0.l6.t3 9477 390 96.047%
  +
418 sentences
  +
LexEsp-0.l7.t3 9473 394 96.007%
  +
418 sentences
  +
LexEsp-0.l8.t3 9470 397 95.976%
  +
418 sentences
  +
LexEsp-0.l9.t3 9470 397 95.976%
  +
418 sentences
  +
LexEsp-0.l10.t3 9470 397 95.976%
  +
</pre>

Revision as of 13:44, 23 December 2007

Tarea 1

$ evaluate.pl LexEsp-0.cooked LexEsp-0.t3 
418 sentences
         LexEsp-0.t3     9470      397  95.976%

Tarea 2

$ for i in `seq 1 9`; do 
    cat LexEsp-[1-$i].cooked > LexExp-ejecucion$i.cooked; 
    cooked2lex.pl < LexExp-ejecucion$i.cooked > train.$i.lex; 
    cooked2ngram.pl < LexExp-ejecucion$i.cooked > train.$i.ngrams; 
    t3 train.$i.ngrams train.$i.lex < LexEsp-0.raw > LexEsp-0.$i.t3; 
    evaluate.pl LexEsp-0.cooked LexEsp-0.$i.t3 >> output ; 
done

$ wc -l LexExp-ejecucion*.cooked
    418 LexExp-ejecucion1.cooked
    836 LexExp-ejecucion2.cooked
   1254 LexExp-ejecucion3.cooked
   1672 LexExp-ejecucion4.cooked
   2090 LexExp-ejecucion5.cooked
   2508 LexExp-ejecucion6.cooked
   2926 LexExp-ejecucion7.cooked
   3344 LexExp-ejecucion8.cooked
   3761 LexExp-ejecucion9.cooked

$ cat output
418 sentences
       LexEsp-0.1.t3     8948      919  90.686%
418 sentences
       LexEsp-0.2.t3     9155      712  92.784%
418 sentences
       LexEsp-0.3.t3     9275      592  94.000%
418 sentences
       LexEsp-0.4.t3     9313      554  94.385%
418 sentences
       LexEsp-0.5.t3     9366      501  94.922%
418 sentences
       LexEsp-0.6.t3     9391      476  95.176%
418 sentences
       LexEsp-0.7.t3     9419      448  95.460%
418 sentences
       LexEsp-0.8.t3     9444      423  95.713%
418 sentences
       LexEsp-0.9.t3     9470      397  95.976%

Tarea 3

$ for i in `seq 1 10`; do

   t3 -l $i train.ngrams train.lex < LexEsp-0.raw > LexEsp-0.l$i.t3; 
   evaluate.pl LexEsp-0.cooked LexEsp-0.l$i.t3 >> output.l; 

done

$ cat output.l 418 sentences

     LexEsp-0.l1.t3     9411      456  95.379%

418 sentences

     LexEsp-0.l2.t3     9466      401  95.936%

418 sentences

     LexEsp-0.l3.t3     9492      375  96.199%

418 sentences

     LexEsp-0.l4.t3     9490      377  96.179%

418 sentences

     LexEsp-0.l5.t3     9473      394  96.007%

418 sentences

     LexEsp-0.l6.t3     9477      390  96.047%

418 sentences

     LexEsp-0.l7.t3     9473      394  96.007%

418 sentences

     LexEsp-0.l8.t3     9470      397  95.976%

418 sentences

     LexEsp-0.l9.t3     9470      397  95.976%

418 sentences

    LexEsp-0.l10.t3     9470      397  95.976%