@@ -144,7 +144,29 @@ func importImageStream(ctx context.Context,
144144 client * containerdapi.Client ,
145145 srv machine.MachineService_DebugContainerServer ,
146146) (containerdapi.Image , error ) {
147- images , err := client .Import (ctx , & imageChunkReaderWrapper {srv : srv })
147+ r , w := io .Pipe ()
148+
149+ go func () {
150+ for {
151+ msg , err := srv .Recv ()
152+ if err != nil {
153+ log .Printf ("import image: receive error: %s" , err .Error ())
154+
155+ return
156+ }
157+
158+ chunk := msg .GetImageChunk ()
159+ if chunk == nil || len (chunk .GetBytes ()) == 0 {
160+ return
161+ }
162+
163+ if _ , err := w .Write (chunk .GetBytes ()); err != nil {
164+ log .Printf ("import image: write error: %s" , err .Error ())
165+ }
166+ }
167+ }()
168+
169+ images , err := client .Import (ctx , r )
148170 if err != nil {
149171 return nil , fmt .Errorf ("failed to import image: %v" , err )
150172 }
@@ -318,70 +340,6 @@ func runAndAttachContainer(
318340 return nil
319341}
320342
321- // TODO: I think it makes more sense to have the API be
322- // split into two separate calls - one for creating the
323- // debug container (with the spec, still a streaming RPC
324- // that stays open while the container is running), and
325- // another one for bidirectional stdin/out streaming.
326-
327- // imageChunkReaderWrapper is a plain io.Reader wrapper
328- // around the gRPC stream that we can pass to containerd.
329- //
330- // Since we don't control the size of the incoming chunks,
331- // (or the `Read()`s), we hold onto the last received chunk
332- // in case len(chunk) > read size.
333- //
334- // TODO(laurazard): this could:
335- // - be faster, if we just loaded the entire image into
336- // memory instead of reading from the client piecemeal
337- // - be simpler, by just passing a buffer into containerd
338- // and `io.Copy`ing into it.
339- type imageChunkReaderWrapper struct {
340- srv machine.MachineService_DebugContainerServer
341-
342- // buffered image chunk data - size of incoming chunks may
343- // be larger than reads, and we don't want to lose data in
344- // between reads.
345- currentChunk []byte
346- // Read position in currentChunk
347- offset int
348- }
349-
350- func (i * imageChunkReaderWrapper ) Read (b []byte ) (int , error ) {
351- return i .readImageChunk (b )
352- }
353-
354- func (i * imageChunkReaderWrapper ) readImageChunk (b []byte ) (int , error ) {
355- if len (i .currentChunk )- i .offset == 0 {
356- err := i .loadNewChunk ()
357- if err != nil {
358- return 0 , err
359- }
360- }
361-
362- n := copy (b , i .currentChunk [i .offset :])
363- i .offset += n
364-
365- return n , nil
366- }
367-
368- func (i * imageChunkReaderWrapper ) loadNewChunk () error {
369- msg , err := i .srv .Recv ()
370- if err != nil {
371- return err
372- }
373-
374- chunk := msg .GetImageChunk ()
375- if chunk == nil || len (chunk .GetBytes ()) == 0 {
376- return io .EOF
377- }
378-
379- i .currentChunk = chunk .GetBytes ()
380- i .offset = 0
381-
382- return nil
383- }
384-
385343func generateContainerID () (string , error ) {
386344 b := make ([]byte , 8 )
387345 if _ , err := rand .Read (b ); err != nil {
0 commit comments