@@ -14,6 +14,17 @@ const CID = require('cids')
1414const loadFixture = require ( 'aegir/fixtures' )
1515const doUntil = require ( 'async/doUntil' )
1616const waterfall = require ( 'async/waterfall' )
17+ const parallel = require ( 'async/parallel' )
18+ const series = require ( 'async/series' )
19+ const fs = require ( 'fs' )
20+ const path = require ( 'path' )
21+ const push = require ( 'pull-pushable' )
22+ const toPull = require ( 'stream-to-pull-stream' )
23+ const toStream = require ( 'pull-stream-to-stream' )
24+ const {
25+ DAGNode,
26+ DAGLink
27+ } = require ( 'ipld-dag-pb' )
1728
1829const unixFSEngine = require ( './../src' )
1930const exporter = unixFSEngine . exporter
@@ -64,6 +75,52 @@ module.exports = (repo) => {
6475 } )
6576 }
6677
78+ function addTestDirectory ( { directory, strategy = 'balanced' , maxChunkSize} , callback ) {
79+ const input = push ( )
80+ const dirName = path . basename ( directory )
81+
82+ pull (
83+ input ,
84+ pull . map ( ( file ) => {
85+ return {
86+ path : path . join ( dirName , path . basename ( file ) ) ,
87+ content : toPull . source ( fs . createReadStream ( file ) )
88+ }
89+ } ) ,
90+ importer ( ipld , {
91+ strategy,
92+ maxChunkSize
93+ } ) ,
94+ pull . collect ( callback )
95+ )
96+
97+ const listFiles = ( directory , depth , stream , cb ) => {
98+ waterfall ( [
99+ ( done ) => fs . stat ( directory , done ) ,
100+ ( stats , done ) => {
101+ if ( stats . isDirectory ( ) ) {
102+ return waterfall ( [
103+ ( done ) => fs . readdir ( directory , done ) ,
104+ ( children , done ) => {
105+ series (
106+ children . map ( child => ( next ) => listFiles ( path . join ( directory , child ) , depth + 1 , stream , next ) ) ,
107+ done
108+ )
109+ }
110+ ] , done )
111+ }
112+
113+ stream . push ( directory )
114+ done ( )
115+ }
116+ ] , cb )
117+ }
118+
119+ listFiles ( directory , 0 , input , ( ) => {
120+ input . end ( )
121+ } )
122+ }
123+
67124 function checkBytesThatSpanBlocks ( strategy , cb ) {
68125 const bytesInABlock = 262144
69126 const bytes = Buffer . alloc ( bytesInABlock + 100 , 0 )
@@ -517,6 +574,55 @@ module.exports = (repo) => {
517574 checkBytesThatSpanBlocks ( 'trickle' , done )
518575 } )
519576
577+ it ( 'exports a directory containing an empty file whose content gets turned into a ReadableStream' , function ( done ) {
578+ // replicates the behaviour of ipfs.files.get
579+ waterfall ( [
580+ ( cb ) => addTestDirectory ( {
581+ directory : path . join ( __dirname , 'fixtures' , 'dir-with-empty-files' )
582+ } , cb ) ,
583+ ( result , cb ) => {
584+ const dir = result . pop ( )
585+
586+ pull (
587+ exporter ( dir . multihash , ipld ) ,
588+ pull . map ( ( file ) => {
589+ if ( file . content ) {
590+ file . content = toStream . source ( file . content )
591+ file . content . pause ( )
592+ }
593+
594+ return file
595+ } ) ,
596+ pull . collect ( ( error , files ) => {
597+ if ( error ) {
598+ return cb ( error )
599+ }
600+
601+ series (
602+ files
603+ . filter ( file => Boolean ( file . content ) )
604+ . map ( file => {
605+ return ( done ) => {
606+ if ( file . content ) {
607+ file . content
608+ . pipe ( toStream . sink ( pull . collect ( ( error , bufs ) => {
609+ expect ( error ) . to . not . exist ( )
610+ expect ( bufs . length ) . to . equal ( 1 )
611+ expect ( bufs [ 0 ] . length ) . to . equal ( 0 )
612+
613+ done ( )
614+ } ) ) )
615+ }
616+ }
617+ } ) ,
618+ cb
619+ )
620+ } )
621+ )
622+ }
623+ ] , done )
624+ } )
625+
520626 // TODO: This needs for the stores to have timeouts,
521627 // otherwise it is impossible to predict if a file doesn't
522628 // really exist
@@ -532,6 +638,100 @@ module.exports = (repo) => {
532638 } )
533639 )
534640 } )
641+
642+ it ( 'exports file with data on internal and leaf nodes' , function ( done ) {
643+ waterfall ( [
644+ ( cb ) => createAndPersistNode ( ipld , 'raw' , [ 0x04 , 0x05 , 0x06 , 0x07 ] , [ ] , cb ) ,
645+ ( leaf , cb ) => createAndPersistNode ( ipld , 'file' , [ 0x00 , 0x01 , 0x02 , 0x03 ] , [
646+ leaf
647+ ] , cb ) ,
648+ ( file , cb ) => {
649+ pull (
650+ exporter ( file . multihash , ipld ) ,
651+ pull . asyncMap ( ( file , cb ) => readFile ( file , cb ) ) ,
652+ pull . through ( buffer => {
653+ expect ( buffer ) . to . deep . equal ( Buffer . from ( [ 0x00 , 0x01 , 0x02 , 0x03 , 0x04 , 0x05 , 0x06 , 0x07 ] ) )
654+ } ) ,
655+ pull . collect ( cb )
656+ )
657+ }
658+ ] , done )
659+ } )
660+
661+ it ( 'exports file with data on some internal and leaf nodes' , function ( done ) {
662+ // create a file node with three children:
663+ // where:
664+ // i = internal node without data
665+ // d = internal node with data
666+ // l = leaf node with data
667+ // i
668+ // / | \
669+ // l d i
670+ // | \
671+ // l l
672+ waterfall ( [
673+ ( cb ) => {
674+ // create leaves
675+ parallel ( [
676+ ( next ) => createAndPersistNode ( ipld , 'raw' , [ 0x00 , 0x01 , 0x02 , 0x03 ] , [ ] , next ) ,
677+ ( next ) => createAndPersistNode ( ipld , 'raw' , [ 0x08 , 0x09 , 0x10 , 0x11 ] , [ ] , next ) ,
678+ ( next ) => createAndPersistNode ( ipld , 'raw' , [ 0x12 , 0x13 , 0x14 , 0x15 ] , [ ] , next )
679+ ] , cb )
680+ } ,
681+ ( leaves , cb ) => {
682+ parallel ( [
683+ ( next ) => createAndPersistNode ( ipld , 'raw' , [ 0x04 , 0x05 , 0x06 , 0x07 ] , [ leaves [ 1 ] ] , next ) ,
684+ ( next ) => createAndPersistNode ( ipld , 'raw' , null , [ leaves [ 2 ] ] , next )
685+ ] , ( error , internalNodes ) => {
686+ if ( error ) {
687+ return cb ( error )
688+ }
689+
690+ createAndPersistNode ( ipld , 'file' , null , [
691+ leaves [ 0 ] ,
692+ internalNodes [ 0 ] ,
693+ internalNodes [ 1 ]
694+ ] , cb )
695+ } )
696+ } ,
697+ ( file , cb ) => {
698+ pull (
699+ exporter ( file . multihash , ipld ) ,
700+ pull . asyncMap ( ( file , cb ) => readFile ( file , cb ) ) ,
701+ pull . through ( buffer => {
702+ expect ( buffer ) . to . deep . equal (
703+ Buffer . from ( [
704+ 0x00 , 0x01 , 0x02 , 0x03 , 0x04 , 0x05 , 0x06 , 0x07 ,
705+ 0x08 , 0x09 , 0x10 , 0x11 , 0x12 , 0x13 , 0x14 , 0x15
706+ ] )
707+ )
708+ } ) ,
709+ pull . collect ( cb )
710+ )
711+ }
712+ ] , done )
713+ } )
714+
715+ it ( 'exports file with data on internal and leaf nodes with an offset that only fetches data from leaf nodes' , function ( done ) {
716+ waterfall ( [
717+ ( cb ) => createAndPersistNode ( ipld , 'raw' , [ 0x04 , 0x05 , 0x06 , 0x07 ] , [ ] , cb ) ,
718+ ( leaf , cb ) => createAndPersistNode ( ipld , 'file' , [ 0x00 , 0x01 , 0x02 , 0x03 ] , [
719+ leaf
720+ ] , cb ) ,
721+ ( file , cb ) => {
722+ pull (
723+ exporter ( file . multihash , ipld , {
724+ offset : 4
725+ } ) ,
726+ pull . asyncMap ( ( file , cb ) => readFile ( file , cb ) ) ,
727+ pull . through ( buffer => {
728+ expect ( buffer ) . to . deep . equal ( Buffer . from ( [ 0x04 , 0x05 , 0x06 , 0x07 ] ) )
729+ } ) ,
730+ pull . collect ( cb )
731+ )
732+ }
733+ ] , done )
734+ } )
535735 } )
536736}
537737
@@ -567,3 +767,26 @@ function readFile (file, done) {
567767 } )
568768 )
569769}
770+
771+ function createAndPersistNode ( ipld , type , data , children , callback ) {
772+ const file = new UnixFS ( type , data ? Buffer . from ( data ) : undefined )
773+ const links = [ ]
774+
775+ children . forEach ( child => {
776+ const leaf = UnixFS . unmarshal ( child . data )
777+
778+ file . addBlockSize ( leaf . fileSize ( ) )
779+
780+ links . push ( new DAGLink ( '' , child . size , child . multihash ) )
781+ } )
782+
783+ DAGNode . create ( file . marshal ( ) , links , ( error , node ) => {
784+ if ( error ) {
785+ return callback ( error )
786+ }
787+
788+ ipld . put ( node , {
789+ cid : new CID ( node . multihash )
790+ } , ( error ) => callback ( error , node ) )
791+ } )
792+ }
0 commit comments