Fix race in LFS ContentStore.Put(...) (#14895)

Continuing on from #14888

The previous implementation has race whereby an incomplete upload or
hash mismatch upload can end up in the ContentStore. This PR moves the
validation into the reader so that if there is a hash error or size
mismatch the reader will return with an error instead of an io.EOF
causing the storage to abort the storage.

Signed-off-by: Andrew Thornton <art27@cantab.net>
release/v1.15
zeripath 2021-03-06 20:21:56 +00:00 committed by GitHub
parent 5ccf8b6430
commit a68db9076a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 51 additions and 11 deletions

View File

@ -9,6 +9,7 @@ import (
"encoding/hex" "encoding/hex"
"errors" "errors"
"fmt" "fmt"
"hash"
"io" "io"
"os" "os"
@ -66,15 +67,20 @@ func (s *ContentStore) Get(meta *models.LFSMetaObject, fromByte int64) (io.ReadC
// Put takes a Meta object and an io.Reader and writes the content to the store. // Put takes a Meta object and an io.Reader and writes the content to the store.
func (s *ContentStore) Put(meta *models.LFSMetaObject, r io.Reader) error { func (s *ContentStore) Put(meta *models.LFSMetaObject, r io.Reader) error {
hash := sha256.New()
rd := io.TeeReader(r, hash)
p := meta.RelativePath() p := meta.RelativePath()
written, err := s.Save(p, rd)
// Wrap the provided reader with an inline hashing and size checker
wrappedRd := newHashingReader(meta.Size, meta.Oid, r)
// now pass the wrapped reader to Save - if there is a size mismatch or hash mismatch then
// the errors returned by the newHashingReader should percolate up to here
written, err := s.Save(p, wrappedRd)
if err != nil { if err != nil {
log.Error("Whilst putting LFS OID[%s]: Failed to copy to tmpPath: %s Error: %v", meta.Oid, p, err) log.Error("Whilst putting LFS OID[%s]: Failed to copy to tmpPath: %s Error: %v", meta.Oid, p, err)
return err return err
} }
// This shouldn't happen but it is sensible to test
if written != meta.Size { if written != meta.Size {
if err := s.Delete(p); err != nil { if err := s.Delete(p); err != nil {
log.Error("Cleaning the LFS OID[%s] failed: %v", meta.Oid, err) log.Error("Cleaning the LFS OID[%s] failed: %v", meta.Oid, err)
@ -82,14 +88,6 @@ func (s *ContentStore) Put(meta *models.LFSMetaObject, r io.Reader) error {
return errSizeMismatch return errSizeMismatch
} }
shaStr := hex.EncodeToString(hash.Sum(nil))
if shaStr != meta.Oid {
if err := s.Delete(p); err != nil {
log.Error("Cleaning the LFS OID[%s] failed: %v", meta.Oid, err)
}
return errHashMismatch
}
return nil return nil
} }
@ -118,3 +116,45 @@ func (s *ContentStore) Verify(meta *models.LFSMetaObject) (bool, error) {
return true, nil return true, nil
} }
type hashingReader struct {
internal io.Reader
currentSize int64
expectedSize int64
hash hash.Hash
expectedHash string
}
func (r *hashingReader) Read(b []byte) (int, error) {
n, err := r.internal.Read(b)
if n > 0 {
r.currentSize += int64(n)
wn, werr := r.hash.Write(b[:n])
if wn != n || werr != nil {
return n, werr
}
}
if err != nil && err == io.EOF {
if r.currentSize != r.expectedSize {
return n, errSizeMismatch
}
shaStr := hex.EncodeToString(r.hash.Sum(nil))
if shaStr != r.expectedHash {
return n, errHashMismatch
}
}
return n, err
}
func newHashingReader(expectedSize int64, expectedHash string, reader io.Reader) *hashingReader {
return &hashingReader{
internal: reader,
expectedSize: expectedSize,
expectedHash: expectedHash,
hash: sha256.New(),
}
}