Java Examples for com.amazonaws.services.s3.model.S3ObjectInputStream
The following java examples will help you to understand the usage of com.amazonaws.services.s3.model.S3ObjectInputStream. These source code samples are taken from different open source projects.
Example 1
| Project: aws-sdk-android-master File: AmazonS3Client.java View source code |
/*
* (non-Javadoc)
* @see
* com.amazonaws.services.s3.AmazonS3#getObject(com.amazonaws.services.s3
* .model.GetObjectRequest)
*/
@Override
public S3Object getObject(GetObjectRequest getObjectRequest) throws AmazonClientException, AmazonServiceException {
assertParameterNotNull(getObjectRequest, "The GetObjectRequest parameter must be specified when requesting an object");
assertParameterNotNull(getObjectRequest.getBucketName(), "The bucket name parameter must be specified when requesting an object");
assertParameterNotNull(getObjectRequest.getKey(), "The key parameter must be specified when requesting an object");
Request<GetObjectRequest> request = createRequest(getObjectRequest.getBucketName(), getObjectRequest.getKey(), getObjectRequest, HttpMethodName.GET);
if (getObjectRequest.getVersionId() != null) {
request.addParameter("versionId", getObjectRequest.getVersionId());
}
// Range
long[] range = getObjectRequest.getRange();
if (range != null) {
String rangeHeader = "bytes=" + Long.toString(range[0]) + "-";
if (range[1] >= 0) {
/*
* Negative value is invalid per S3 range get and will result in
* downloading the entire object. Leaving last byte empty so as
* to resume download from range[0].
*/
rangeHeader += Long.toString(range[1]);
}
request.addHeader(Headers.RANGE, rangeHeader);
}
if (getObjectRequest.isRequesterPays()) {
request.addHeader(Headers.REQUESTER_PAYS_HEADER, Constants.REQUESTER_PAYS);
}
addResponseHeaderParameters(request, getObjectRequest.getResponseHeaders());
addDateHeader(request, Headers.GET_OBJECT_IF_MODIFIED_SINCE, getObjectRequest.getModifiedSinceConstraint());
addDateHeader(request, Headers.GET_OBJECT_IF_UNMODIFIED_SINCE, getObjectRequest.getUnmodifiedSinceConstraint());
addStringListHeader(request, Headers.GET_OBJECT_IF_MATCH, getObjectRequest.getMatchingETagConstraints());
addStringListHeader(request, Headers.GET_OBJECT_IF_NONE_MATCH, getObjectRequest.getNonmatchingETagConstraints());
// Populate the SSE-CPK parameters to the request header
populateSseCpkRequestParameters(request, getObjectRequest.getSSECustomerKey());
/*
* This is compatible with progress listener set by either the legacy
* method GetObjectRequest#setProgressListener or the new method
* GetObjectRequest#setGeneralProgressListener.
*/
ProgressListener progressListener = getObjectRequest.getGeneralProgressListener();
ProgressListenerCallbackExecutor progressListenerCallbackExecutor = ProgressListenerCallbackExecutor.wrapListener(progressListener);
try {
S3Object s3Object = invoke(request, new S3ObjectResponseHandler(), getObjectRequest.getBucketName(), getObjectRequest.getKey());
/*
* TODO: For now, it's easiest to set there here in the client, but
* we could push this back into the response handler with a little
* more work.
*/
s3Object.setBucketName(getObjectRequest.getBucketName());
s3Object.setKey(getObjectRequest.getKey());
InputStream input = s3Object.getObjectContent();
// Hold a reference to this client while the InputStream is still
// around - otherwise a finalizer in the HttpClient may reset the
// underlying TCP connection out from under us.
input = new ServiceClientHolderInputStream(input, this);
// stream in a filter that will trigger progress reports.
if (progressListenerCallbackExecutor != null) {
@SuppressWarnings("resource") ProgressReportingInputStream progressReportingInputStream = new ProgressReportingInputStream(input, progressListenerCallbackExecutor);
progressReportingInputStream.setFireCompletedEvent(true);
input = progressReportingInputStream;
fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.STARTED_EVENT_CODE);
}
// bytes and complains if what we received doesn't match the Etag.
if (!ServiceUtils.skipMd5CheckPerRequest(getObjectRequest) && !ServiceUtils.skipMd5CheckPerResponse(s3Object.getObjectMetadata())) {
byte[] serverSideHash = null;
String etag = s3Object.getObjectMetadata().getETag();
if (etag != null && ServiceUtils.isMultipartUploadETag(etag) == false) {
serverSideHash = BinaryUtils.fromHex(s3Object.getObjectMetadata().getETag());
try {
// No content length check is performed when the
// MD5 check is enabled, since a correct MD5 check would
// imply a correct content length.
MessageDigest digest = MessageDigest.getInstance("MD5");
input = new DigestValidationInputStream(input, digest, serverSideHash);
} catch (NoSuchAlgorithmException e) {
log.warn("No MD5 digest algorithm available. Unable to calculate " + "checksum and verify data integrity.", e);
}
}
} else {
// Ensures the data received from S3 has the same length as the
// expected content-length
input = new LengthCheckInputStream(input, // expected
s3Object.getObjectMetadata().getContentLength(), // length
INCLUDE_SKIPPED_BYTES);
// bytes received from S3 are
// all included even if skipped
}
// Re-wrap within an S3ObjectInputStream. Explicitly do not collect
// metrics here because we know we're ultimately wrapping another
// S3ObjectInputStream which will take care of that.
s3Object.setObjectContent(new S3ObjectInputStream(input));
return s3Object;
} catch (AmazonS3Exception ase) {
if (ase.getStatusCode() == 412 || ase.getStatusCode() == 304) {
fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.CANCELED_EVENT_CODE);
return null;
}
fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.FAILED_EVENT_CODE);
throw ase;
}
}Example 2
| Project: aws-java-sdk-master File: AmazonS3Client.java View source code |
@Override
public S3Object getObject(GetObjectRequest getObjectRequest) throws SdkClientException, AmazonServiceException {
getObjectRequest = beforeClientExecution(getObjectRequest);
assertNotNull(getObjectRequest, "GetObjectRequest");
assertStringNotEmpty(getObjectRequest.getBucketName(), "BucketName");
assertStringNotEmpty(getObjectRequest.getKey(), "Key");
Request<GetObjectRequest> request = createRequest(getObjectRequest.getBucketName(), getObjectRequest.getKey(), getObjectRequest, HttpMethodName.GET);
if (getObjectRequest.getVersionId() != null) {
request.addParameter("versionId", getObjectRequest.getVersionId());
}
addPartNumberIfNotNull(request, getObjectRequest.getPartNumber());
// Range
long[] range = getObjectRequest.getRange();
if (range != null) {
request.addHeader(Headers.RANGE, "bytes=" + Long.toString(range[0]) + "-" + Long.toString(range[1]));
}
populateRequesterPaysHeader(request, getObjectRequest.isRequesterPays());
addResponseHeaderParameters(request, getObjectRequest.getResponseHeaders());
addDateHeader(request, Headers.GET_OBJECT_IF_MODIFIED_SINCE, getObjectRequest.getModifiedSinceConstraint());
addDateHeader(request, Headers.GET_OBJECT_IF_UNMODIFIED_SINCE, getObjectRequest.getUnmodifiedSinceConstraint());
addStringListHeader(request, Headers.GET_OBJECT_IF_MATCH, getObjectRequest.getMatchingETagConstraints());
addStringListHeader(request, Headers.GET_OBJECT_IF_NONE_MATCH, getObjectRequest.getNonmatchingETagConstraints());
// Populate the SSE-C parameters to the request header
populateSSE_C(request, getObjectRequest.getSSECustomerKey());
final ProgressListener listener = getObjectRequest.getGeneralProgressListener();
publishProgress(listener, ProgressEventType.TRANSFER_STARTED_EVENT);
try {
S3Object s3Object = invoke(request, new S3ObjectResponseHandler(), getObjectRequest.getBucketName(), getObjectRequest.getKey());
/*
* TODO: For now, it's easiest to set there here in the client, but
* we could push this back into the response handler with a
* little more work.
*/
s3Object.setBucketName(getObjectRequest.getBucketName());
s3Object.setKey(getObjectRequest.getKey());
InputStream is = s3Object.getObjectContent();
HttpRequestBase httpRequest = s3Object.getObjectContent().getHttpRequest();
// Hold a reference to this client while the InputStream is still
// around - otherwise a finalizer in the HttpClient may reset the
// underlying TCP connection out from under us.
is = new ServiceClientHolderInputStream(is, this);
// used trigger a tranfer complete event when the stream is entirely consumed
ProgressInputStream progressInputStream = new ProgressInputStream(is, listener) {
@Override
protected void onEOF() {
publishProgress(getListener(), ProgressEventType.TRANSFER_COMPLETED_EVENT);
}
};
is = progressInputStream;
// bytes and complains if what we received doesn't match the Etag.
if (!skipMd5CheckStrategy.skipClientSideValidation(getObjectRequest, s3Object.getObjectMetadata())) {
byte[] serverSideHash = BinaryUtils.fromHex(s3Object.getObjectMetadata().getETag());
try {
// No content length check is performed when the
// MD5 check is enabled, since a correct MD5 check would
// imply a correct content length.
MessageDigest digest = MessageDigest.getInstance("MD5");
is = new DigestValidationInputStream(is, digest, serverSideHash);
} catch (NoSuchAlgorithmException e) {
log.warn("No MD5 digest algorithm available. Unable to calculate " + "checksum and verify data integrity.", e);
}
} else {
// Ensures the data received from S3 has the same length as the
// expected content-length
is = new LengthCheckInputStream(is, // expected length
s3Object.getObjectMetadata().getContentLength(), // bytes received from S3 are all included even if skipped
INCLUDE_SKIPPED_BYTES);
}
s3Object.setObjectContent(new S3ObjectInputStream(is, httpRequest, false));
return s3Object;
} catch (AmazonS3Exception ase) {
if (ase.getStatusCode() == 412 || ase.getStatusCode() == 304) {
publishProgress(listener, ProgressEventType.TRANSFER_CANCELED_EVENT);
return null;
}
publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT);
throw ase;
}
}Example 3
| Project: aws-sdk-java-master File: AmazonS3Client.java View source code |
@Override
public S3Object getObject(GetObjectRequest getObjectRequest) throws SdkClientException, AmazonServiceException {
getObjectRequest = beforeClientExecution(getObjectRequest);
assertNotNull(getObjectRequest, "GetObjectRequest");
assertStringNotEmpty(getObjectRequest.getBucketName(), "BucketName");
assertStringNotEmpty(getObjectRequest.getKey(), "Key");
Request<GetObjectRequest> request = createRequest(getObjectRequest.getBucketName(), getObjectRequest.getKey(), getObjectRequest, HttpMethodName.GET);
if (getObjectRequest.getVersionId() != null) {
request.addParameter("versionId", getObjectRequest.getVersionId());
}
addPartNumberIfNotNull(request, getObjectRequest.getPartNumber());
// Range
long[] range = getObjectRequest.getRange();
if (range != null) {
request.addHeader(Headers.RANGE, "bytes=" + Long.toString(range[0]) + "-" + Long.toString(range[1]));
}
populateRequesterPaysHeader(request, getObjectRequest.isRequesterPays());
addResponseHeaderParameters(request, getObjectRequest.getResponseHeaders());
addDateHeader(request, Headers.GET_OBJECT_IF_MODIFIED_SINCE, getObjectRequest.getModifiedSinceConstraint());
addDateHeader(request, Headers.GET_OBJECT_IF_UNMODIFIED_SINCE, getObjectRequest.getUnmodifiedSinceConstraint());
addStringListHeader(request, Headers.GET_OBJECT_IF_MATCH, getObjectRequest.getMatchingETagConstraints());
addStringListHeader(request, Headers.GET_OBJECT_IF_NONE_MATCH, getObjectRequest.getNonmatchingETagConstraints());
// Populate the SSE-C parameters to the request header
populateSSE_C(request, getObjectRequest.getSSECustomerKey());
final ProgressListener listener = getObjectRequest.getGeneralProgressListener();
publishProgress(listener, ProgressEventType.TRANSFER_STARTED_EVENT);
try {
S3Object s3Object = invoke(request, new S3ObjectResponseHandler(), getObjectRequest.getBucketName(), getObjectRequest.getKey());
/*
* TODO: For now, it's easiest to set there here in the client, but
* we could push this back into the response handler with a
* little more work.
*/
s3Object.setBucketName(getObjectRequest.getBucketName());
s3Object.setKey(getObjectRequest.getKey());
InputStream is = s3Object.getObjectContent();
HttpRequestBase httpRequest = s3Object.getObjectContent().getHttpRequest();
// Hold a reference to this client while the InputStream is still
// around - otherwise a finalizer in the HttpClient may reset the
// underlying TCP connection out from under us.
is = new ServiceClientHolderInputStream(is, this);
// used trigger a tranfer complete event when the stream is entirely consumed
ProgressInputStream progressInputStream = new ProgressInputStream(is, listener) {
@Override
protected void onEOF() {
publishProgress(getListener(), ProgressEventType.TRANSFER_COMPLETED_EVENT);
}
};
is = progressInputStream;
// bytes and complains if what we received doesn't match the Etag.
if (!skipMd5CheckStrategy.skipClientSideValidation(getObjectRequest, s3Object.getObjectMetadata())) {
byte[] serverSideHash = BinaryUtils.fromHex(s3Object.getObjectMetadata().getETag());
try {
// No content length check is performed when the
// MD5 check is enabled, since a correct MD5 check would
// imply a correct content length.
MessageDigest digest = MessageDigest.getInstance("MD5");
is = new DigestValidationInputStream(is, digest, serverSideHash);
} catch (NoSuchAlgorithmException e) {
log.warn("No MD5 digest algorithm available. Unable to calculate " + "checksum and verify data integrity.", e);
}
} else {
// Ensures the data received from S3 has the same length as the
// expected content-length
is = new LengthCheckInputStream(is, // expected length
s3Object.getObjectMetadata().getContentLength(), // bytes received from S3 are all included even if skipped
INCLUDE_SKIPPED_BYTES);
}
s3Object.setObjectContent(new S3ObjectInputStream(is, httpRequest, false));
return s3Object;
} catch (AmazonS3Exception ase) {
if (ase.getStatusCode() == 412 || ase.getStatusCode() == 304) {
publishProgress(listener, ProgressEventType.TRANSFER_CANCELED_EVENT);
return null;
}
publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT);
throw ase;
}
}Example 4
| Project: sirusi-master File: SirusiHandlerTest.java View source code |
@Test
public void index() throws Exception {
Context ctx = createContext();
String content = "aaaaaaaaaaaaaa";
ObjectMetadata meta = mock(ObjectMetadata.class);
when(meta.getContentLength()).thenReturn(Long.valueOf(content.length()));
S3Object obj = mock(S3Object.class);
when(obj.getObjectMetadata()).thenReturn(meta);
InputStream in = new ByteArrayInputStream(content.getBytes(StandardCharsets.UTF_8));
when(obj.getObjectContent()).thenReturn(new S3ObjectInputStream(in, null));
when(s3.getObject(anyString(), anyString())).thenReturn(obj);
input.seed = "index.html";
Response resp = target.handleRequest(input, ctx);
assertEquals(content, resp.getContent());
}Example 5
| Project: aws-cloudtrail-processing-library-master File: S3Manager.java View source code |
/**
* Downloads an AWS CloudTrail log from the specified source.
*
* @param ctLog the {@link com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailLog} to download
* @param source the {@link com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailSource} to download
* the log from.
* @return a byte array containing the log data.
*/
public byte[] downloadLog(CloudTrailLog ctLog, CloudTrailSource source) {
boolean success = false;
ProgressStatus startStatus = new ProgressStatus(ProgressState.downloadLog, new BasicProcessLogInfo(source, ctLog, success));
final Object downloadSourceReportObject = this.progressReporter.reportStart(startStatus);
byte[] s3ObjectBytes = null;
// start to download CloudTrail log
try {
S3Object s3Object = this.getObject(ctLog.getS3Bucket(), ctLog.getS3ObjectKey());
try (S3ObjectInputStream s3InputStream = s3Object.getObjectContent()) {
s3ObjectBytes = LibraryUtils.toByteArray(s3InputStream);
}
ctLog.setLogFileSize(s3Object.getObjectMetadata().getContentLength());
success = true;
logger.info("Downloaded log file " + ctLog.getS3ObjectKey() + " from " + ctLog.getS3Bucket());
} catch (AmazonServiceExceptionIOException | e) {
ProcessingLibraryException exception = new ProcessingLibraryException("Fail to download log file.", e, startStatus);
this.exceptionHandler.handleException(exception);
} finally {
ProgressStatus endStatus = new ProgressStatus(ProgressState.downloadLog, new BasicProcessLogInfo(source, ctLog, success));
this.progressReporter.reportEnd(endStatus, downloadSourceReportObject);
}
return s3ObjectBytes;
}Example 6
| Project: aws-codepipeline-plugin-for-jenkins-master File: DownloadCallableTest.java View source code |
@Before
public void setUp() throws IOException {
MockitoAnnotations.initMocks(this);
TestUtils.initializeTestingFolders();
workspace = Paths.get(TestUtils.TEST_DIR).toFile();
outContent = TestUtils.setOutputStream();
inputArtifacts = new ArrayList<>();
inputArtifacts.add(inputArtifact);
s3ArtifactLocation = new S3ArtifactLocation();
s3ArtifactLocation.setBucketName(S3_BUCKET_NAME);
s3ArtifactLocation.setObjectKey(S3_OBJECT_KEY);
s3ObjectInputStream = new S3ObjectInputStream(new FileInputStream(getClass().getClassLoader().getResource("aws-codedeploy-demo.zip").getFile()), null, false);
when(clientFactory.getAwsClient(anyString(), anyString(), anyString(), anyInt(), anyString(), anyString())).thenReturn(awsClients);
when(awsClients.getCodePipelineClient()).thenReturn(codePipelineClient);
when(awsClients.getS3Client(any(AWSCredentialsProvider.class))).thenReturn(s3Client);
when(s3Client.getObject(anyString(), anyString())).thenReturn(s3Object);
when(s3Object.getKey()).thenReturn(S3_OBJECT_KEY);
when(s3Object.getObjectContent()).thenReturn(s3ObjectInputStream);
when(model.getAwsAccessKey()).thenReturn(ACCESS_KEY);
when(model.getAwsSecretKey()).thenReturn(SECRET_KEY);
when(model.getProxyHost()).thenReturn(PROXY_HOST);
when(model.getProxyPort()).thenReturn(PROXY_PORT);
when(model.getRegion()).thenReturn(REGION);
when(model.getCompressionType()).thenReturn(CompressionType.Zip);
when(job.getId()).thenReturn(JOB_ID);
when(job.getData()).thenReturn(jobData);
when(jobData.getInputArtifacts()).thenReturn(inputArtifacts);
when(codePipelineClient.getJobDetails(any(GetJobDetailsRequest.class))).thenReturn(getJobDetailsResult);
when(getJobDetailsResult.getJobDetails()).thenReturn(jobDetails);
when(jobDetails.getData()).thenReturn(getJobDetailsJobData);
when(getJobDetailsJobData.getArtifactCredentials()).thenReturn(JOB_CREDENTIALS);
when(inputArtifact.getLocation()).thenReturn(artifactLocation);
when(artifactLocation.getS3Location()).thenReturn(s3ArtifactLocation);
downloader = new DownloadCallable(CLEAR_WORKSPACE, job, model, clientFactory, PLUGIN_VERSION, null);
}Example 7
| Project: oodt-master File: S3DataTransferer.java View source code |
private void stageFile(S3Object file, Reference ref, File directory) throws IOException {
S3ObjectInputStream inStream = null;
FileOutputStream outStream = null;
try {
inStream = file.getObjectContent();
outStream = new FileOutputStream(new File(directory, new File(stripProtocol(ref.getDataStoreReference(), false)).getName()));
IOUtils.copy(inStream, outStream);
} finally {
try {
inStream.close();
} catch (Exception ignored) {
}
try {
outStream.close();
} catch (Exception ignored) {
}
}
}Example 8
| Project: Priam-master File: RangeReadInputStream.java View source code |
public Integer retriableCall() throws IOException {
GetObjectRequest req = new GetObjectRequest(bucketName, path.getRemotePath());
req.setRange(firstByte, endByte);
S3ObjectInputStream is = null;
try {
is = s3Client.getObject(req).getObjectContent();
byte[] readBuf = new byte[4092];
int rCnt;
int readTotal = 0;
int incomingOffet = off;
while ((rCnt = is.read(readBuf, 0, readBuf.length)) >= 0) {
System.arraycopy(readBuf, 0, b, incomingOffet, rCnt);
readTotal += rCnt;
incomingOffet += rCnt;
// logger.info(" local read cnt = " + rCnt + "Current Thread Name = "+Thread.currentThread().getName());
}
if (readTotal == 0 && rCnt == -1)
return -1;
offset += readTotal;
return Integer.valueOf(readTotal);
} finally {
IOUtils.closeQuietly(is);
}
}Example 9
| Project: Tank-master File: S3FileStorage.java View source code |
@Override
public InputStream readFileData(FileData fileData) {
String path = FilenameUtils.separatorsToUnix(FilenameUtils.normalize(extraPath + fileData.getPath() + "/" + fileData.getFileName()));
path = StringUtils.stripStart(path, "/");
InputStream ret = null;
S3ObjectInputStream objectContent = null;
try {
S3Object object = s3Client.getObject(bucketName, path);
if (object != null) {
ByteArrayOutputStream temp = new ByteArrayOutputStream();
objectContent = object.getObjectContent();
IOUtils.copy(objectContent, temp);
ret = new ByteArrayInputStream(temp.toByteArray());
if (compress) {
ret = new GZIPInputStream(ret);
}
}
} catch (Exception e) {
LOG.error("Error getting File: " + e, e);
throw new RuntimeException(e);
} finally {
IOUtils.closeQuietly(objectContent);
}
return ret;
}Example 10
| Project: elasticsearch-master File: MockAmazonS3.java View source code |
@Override
public S3Object getObject(GetObjectRequest getObjectRequest) throws AmazonClientException, AmazonServiceException {
// in ESBlobStoreContainerTestCase.java, the prefix is empty,
// so the key and blobName are equivalent to each other
String blobName = getObjectRequest.getKey();
if (!blobs.containsKey(blobName)) {
throw new AmazonS3Exception("[" + blobName + "] does not exist.");
}
// the HTTP request attribute is irrelevant for reading
S3ObjectInputStream stream = new S3ObjectInputStream(blobs.get(blobName), null, false);
S3Object s3Object = new S3Object();
s3Object.setObjectContent(stream);
return s3Object;
}Example 11
| Project: OpenBaas-master File: AwsModel.java View source code |
// *** DOWNLOAD *** //
/**
* Allowed types images, audio, video
*
* Allowed structures, media, storage
*
* @param appId
* @param requestType
* @param id
* @return
* @throws IOException
*/
@Override
public byte[] download(String appId, ModelEnum type, String id, String extension, String quality, String bars) throws IOException {
OutputStream soutputStream = null;
byte[] byteArray = null;
try {
this.startAWS();
StringBuffer directory = new StringBuffer("apps/" + appId + "/media/" + type.toString() + "/" + id);
if (extension != null)
directory.append("." + extension);
S3Object object = s3.getObject(new GetObjectRequest(Const.getAwsOpenBaasBucket(), directory.toString()));
S3ObjectInputStream s3ObjInputStream = object.getObjectContent();
byteArray = IOUtils.toByteArray(s3ObjInputStream);
soutputStream = new FileOutputStream(new File(directory.toString()));
int read = 0;
byte[] bytes = new byte[1024];
S3Object object2 = s3.getObject(new GetObjectRequest(Const.getAwsOpenBaasBucket(), directory.toString()));
while ((read = object2.getObjectContent().read(bytes)) != -1) {
soutputStream.write(bytes, 0, read);
}
} catch (Exception e) {
Log.error("", this, "download", "An error ocorred.", e);
} finally {
soutputStream.close();
}
return byteArray;
}Example 12
| Project: spring-integration-aws-master File: S3MessageHandlerTests.java View source code |
@Bean
public AmazonS3 amazonS3() {
AmazonS3 amazonS3 = mock(AmazonS3.class);
given(amazonS3.putObject(any(PutObjectRequest.class))).willReturn(new PutObjectResult());
ObjectMetadata objectMetadata = new ObjectMetadata();
objectMetadata.setLastModified(new Date());
given(amazonS3.getObjectMetadata(any(GetObjectMetadataRequest.class))).willReturn(objectMetadata);
given(amazonS3.copyObject(any(CopyObjectRequest.class))).willReturn(new CopyObjectResult());
ObjectListing objectListing = spy(new ObjectListing());
List<S3ObjectSummary> s3ObjectSummaries = new LinkedList<>();
S3ObjectSummary fileSummary1 = new S3ObjectSummary();
fileSummary1.setBucketName(S3_BUCKET_NAME);
fileSummary1.setKey(S3_FILE_KEY_FOO);
fileSummary1.setSize(1);
s3ObjectSummaries.add(fileSummary1);
S3ObjectSummary fileSummary2 = new S3ObjectSummary();
fileSummary2.setBucketName(S3_BUCKET_NAME);
fileSummary2.setKey(S3_FILE_KEY_BAR);
fileSummary2.setSize(2);
s3ObjectSummaries.add(fileSummary2);
given(objectListing.getObjectSummaries()).willReturn(s3ObjectSummaries);
given(amazonS3.listObjects(any(ListObjectsRequest.class))).willReturn(objectListing);
final S3Object file1 = new S3Object();
file1.setBucketName(S3_BUCKET_NAME);
file1.setKey(S3_FILE_KEY_FOO);
try {
byte[] data = "f".getBytes(StringUtils.UTF8);
byte[] md5 = Md5Utils.computeMD5Hash(data);
file1.getObjectMetadata().setHeader(Headers.ETAG, BinaryUtils.toHex(md5));
S3ObjectInputStream content = new S3ObjectInputStream(new ByteArrayInputStream(data), mock(HttpRequestBase.class));
file1.setObjectContent(content);
} catch (Exception e) {
}
final S3Object file2 = new S3Object();
file2.setBucketName(S3_BUCKET_NAME);
file2.setKey(S3_FILE_KEY_BAR);
try {
byte[] data = "bb".getBytes(StringUtils.UTF8);
byte[] md5 = Md5Utils.computeMD5Hash(data);
file2.getObjectMetadata().setHeader(Headers.ETAG, BinaryUtils.toHex(md5));
S3ObjectInputStream content = new S3ObjectInputStream(new ByteArrayInputStream(data), mock(HttpRequestBase.class));
file2.setObjectContent(content);
} catch (Exception e) {
}
willAnswer( invocation -> {
GetObjectRequest getObjectRequest = (GetObjectRequest) invocation.getArguments()[0];
String key = getObjectRequest.getKey();
if (S3_FILE_KEY_FOO.equals(key)) {
return file1;
} else if (S3_FILE_KEY_BAR.equals(key)) {
return file2;
} else {
return invocation.callRealMethod();
}
}).given(amazonS3).getObject(any(GetObjectRequest.class));
willAnswer( invocation -> {
aclLatch().countDown();
return null;
}).given(amazonS3).setObjectAcl(any(SetObjectAclRequest.class));
return amazonS3;
}Example 13
| Project: crate-master File: FileReadingCollectorTest.java View source code |
@Test
public void testCollectWithOneSocketTimeout() throws Throwable {
S3ObjectInputStream inputStream = mock(S3ObjectInputStream.class);
when(inputStream.read(new byte[anyInt()], anyInt(), anyByte())).thenAnswer(// first line: foo
new WriteBufferAnswer(new byte[] { 102, 111, 111, 10 })).thenThrow(// exception causes retry
new SocketTimeoutException()).thenAnswer(// first line again, because of retry
new WriteBufferAnswer(new byte[] { 102, 111, 111, 10 })).thenAnswer(// second line: bar
new WriteBufferAnswer(new byte[] { 98, 97, 114, 10 })).thenReturn(-1);
TestingBatchConsumer consumer = getObjects(Collections.singletonList("s3://fakebucket/foo"), null, inputStream);
Bucket rows = consumer.getBucket();
assertThat(rows.size(), is(2));
assertThat(TestingHelpers.printedTable(rows), is("foo\nbar\n"));
}Example 14
| Project: eucalyptus-master File: AWSCloudFormationWaitConditionResourceAction.java View source code |
@Override
public ResourceAction perform(ResourceAction resourceAction) throws Exception {
LOG.trace("Checking for signals");
AWSCloudFormationWaitConditionResourceAction action = (AWSCloudFormationWaitConditionResourceAction) resourceAction;
CreationPolicy creationPolicy = CreationPolicy.parse(action.info.getCreationPolicyJson());
if (creationPolicy != null && creationPolicy.getResourceSignal() != null) {
// check for signals
Collection<SignalEntity> signals = SignalEntityManager.getSignals(action.getStackEntity().getStackId(), action.info.getAccountId(), action.info.getLogicalResourceId(), action.getStackEntity().getStackVersion());
int numSuccessSignals = 0;
if (signals != null) {
for (SignalEntity signal : signals) {
if (signal.getStatus() == SignalEntity.Status.FAILURE) {
throw new ResourceFailureException("Received FAILURE signal with UniqueId " + signal.getUniqueId());
}
if (!signal.getProcessed()) {
StackEventEntityManager.addSignalStackEvent(signal);
signal.setProcessed(true);
SignalEntityManager.updateSignal(signal);
}
numSuccessSignals++;
}
}
if (numSuccessSignals < creationPolicy.getResourceSignal().getCount()) {
long durationMs = System.currentTimeMillis() - Long.valueOf(JsonHelper.getJsonNodeFromString(action.info.getEucaCreateStartTime()).asText());
if (TimeUnit.MILLISECONDS.toSeconds(durationMs) > creationPolicy.getResourceSignal().getTimeout()) {
throw new ResourceFailureException("Failed to receive " + creationPolicy.getResourceSignal().getCount() + " resource signal(s) within the specified duration");
}
throw new RetryAfterConditionCheckFailedException("Not enough success signals yet");
}
ObjectNode dataNode = JsonHelper.createObjectNode();
action.info.setData(JsonHelper.getStringFromJsonNode(new TextNode(dataNode.toString())));
return action;
} else {
if (action.properties.getTimeout() == null) {
throw new ValidationErrorException("Timeout is a required field");
}
if (action.properties.getHandle() == null) {
throw new ValidationErrorException("Handle is a required field");
}
int numSignals = action.properties.getCount() != null && action.properties.getCount() > 0 ? action.properties.getCount() : 1;
LOG.trace("num signals = " + numSignals);
if (action.properties.getTimeout() > 43200) {
throw new ValidationErrorException("timeout can not be more than 43200");
}
LOG.trace("Timeout = " + action.properties.getTimeout());
BucketAndKey bucketAndKey = getBucketAndKey(action);
String bucketName = bucketAndKey.getBucket();
String keyName = bucketAndKey.getKey();
;
boolean foundFailure = false;
final Map<String, String> dataMap = Maps.newHashMap();
try (final EucaS3Client s3c = EucaS3ClientFactory.getEucaS3Client(new CloudFormationAWSCredentialsProvider())) {
LOG.trace("Handle:" + action.properties.getHandle());
VersionListing versionListing = s3c.listVersions(bucketName, "");
LOG.trace("Found " + versionListing.getVersionSummaries() + " versions to check");
for (S3VersionSummary versionSummary : versionListing.getVersionSummaries()) {
LOG.trace("Key:" + versionSummary.getKey());
if (!versionSummary.getKey().equals(keyName)) {
continue;
}
LOG.trace("Getting version: " + versionSummary.getVersionId());
try {
GetObjectRequest getObjectRequest = new GetObjectRequest(bucketName, keyName, versionSummary.getVersionId());
S3Object s3Object = s3c.getObject(getObjectRequest);
JsonNode jsonNode = null;
try (S3ObjectInputStream s3ObjectInputStream = s3Object.getObjectContent()) {
long maxLength = DEFAULT_MAX_LENGTH_WAIT_CONDITION_SIGNAL;
try {
maxLength = Long.parseLong(System.getProperty("cloudformation.max_length_wait_condition_signal"));
} catch (Exception ignore) {
}
jsonNode = Json.parse(ByteStreams.limit(s3ObjectInputStream, maxLength));
}
if (!jsonNode.isObject()) {
LOG.trace("Read object, json but not object..skipping file");
continue;
}
ObjectNode localObjectNode = (ObjectNode) jsonNode;
String status = localObjectNode.get("Status").asText();
if (status == null) {
LOG.trace("Null status, skipping");
continue;
}
String data = localObjectNode.get("Data").asText();
if (data == null) {
LOG.trace("Null data, skipping");
continue;
}
String uniqueId = localObjectNode.get("UniqueId").asText();
if (uniqueId == null) {
LOG.trace("Null uniqueId, skipping");
continue;
}
if ("FAILURE".equals(status)) {
foundFailure = true;
LOG.trace("found failure, gonna die");
break;
} else if (!"SUCCESS".equals(status)) {
LOG.trace("weird status...skipping");
continue;
} else {
LOG.trace("found success, uniqueId=" + uniqueId);
dataMap.put(uniqueId, data);
}
} catch (Exception ex) {
LOG.error(ex, ex);
LOG.trace("Exception while going through the objects, will skip this one.");
}
}
}
if (foundFailure) {
throw new ResourceFailureException("Found failure signal");
}
LOG.trace("Have " + dataMap.size() + " success signals, need " + numSignals);
if (dataMap.size() >= numSignals) {
LOG.trace("Success");
ObjectNode dataNode = JsonHelper.createObjectNode();
for (String uniqueId : dataMap.keySet()) {
dataNode.put(uniqueId, dataMap.get(uniqueId));
}
action.info.setData(JsonHelper.getStringFromJsonNode(new TextNode(dataNode.toString())));
return action;
} else {
long durationMs = System.currentTimeMillis() - Long.valueOf(JsonHelper.getJsonNodeFromString(action.info.getEucaCreateStartTime()).asText());
if (TimeUnit.MILLISECONDS.toSeconds(durationMs) > action.properties.getTimeout()) {
throw new ResourceFailureException("Timeout exeeded waiting for success signals");
}
throw new RetryAfterConditionCheckFailedException("Not enough success signals yet");
}
}
}Example 15
| Project: imhotep-master File: S3RemoteFileSystem.java View source code |
@Override
public InputStream getInputStreamForFile(String fullPath, long startOffset, long maxReadLength) throws IOException {
final String relativePath = mounter.getMountRelativePath(fullPath, mountPoint);
final String s3path = getS3path(relativePath);
final S3Object s3obj;
final S3ObjectInputStream is;
final GetObjectRequest request;
request = new GetObjectRequest(s3bucket, s3path);
if (maxReadLength != -1) {
request.setRange(startOffset, startOffset + maxReadLength);
}
try {
s3obj = client.getObject(request);
is = s3obj.getObjectContent();
return new AutoAbortingS3InputStream(is, maxReadLength);
} catch (AmazonS3Exception e) {
throw new IOException(e);
}
}Example 16
| Project: datacollector-master File: TestAmazonS3Target.java View source code |
@Test
public void testWriteTextData() throws Exception {
String prefix = "testWriteTextData";
String suffix = "txt";
AmazonS3Target amazonS3Target = createS3targetWithTextData(prefix, false, suffix);
TargetRunner targetRunner = new TargetRunner.Builder(AmazonS3DTarget.class, amazonS3Target).build();
targetRunner.runInit();
List<Record> logRecords = TestUtil.createStringRecords();
//Make sure the prefix is empty
ObjectListing objectListing = s3client.listObjects(BUCKET_NAME, prefix);
Assert.assertTrue(objectListing.getObjectSummaries().isEmpty());
targetRunner.runWrite(logRecords);
targetRunner.runDestroy();
//check that prefix contains 1 file
objectListing = s3client.listObjects(BUCKET_NAME, prefix);
Assert.assertEquals(1, objectListing.getObjectSummaries().size());
//Check the keys end with given suffix follow by "."
suffix = "." + suffix;
for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
Assert.assertTrue(objectSummary.getKey().endsWith(suffix));
}
S3ObjectSummary objectSummary = objectListing.getObjectSummaries().get(0);
//get contents of file and check data - should have 9 lines
S3Object object = s3client.getObject(BUCKET_NAME, objectSummary.getKey());
S3ObjectInputStream objectContent = object.getObjectContent();
List<String> stringList = IOUtils.readLines(objectContent);
Assert.assertEquals(9, stringList.size());
for (int i = 0; i < 9; i++) {
Assert.assertEquals(TestUtil.TEST_STRING + i, stringList.get(i));
}
}Example 17
| Project: geowebcache-master File: S3Ops.java View source code |
@Nullable
public byte[] getBytes(String key) throws StorageException {
S3Object object = getObject(key);
if (object == null) {
return null;
}
try (S3ObjectInputStream in = object.getObjectContent()) {
byte[] bytes = IOUtils.toByteArray(in);
return bytes;
} catch (IOException e) {
throw new StorageException("Error getting " + key, e);
}
}Example 18
| Project: presto-master File: PrestoS3FileSystem.java View source code |
private void closeStream() {
if (in != null) {
try {
if (in instanceof S3ObjectInputStream) {
((S3ObjectInputStream) in).abort();
} else {
in.close();
}
} catch (IOExceptionAbortedException | ignored) {
}
in = null;
STATS.connectionReleased();
}
}Example 19
| Project: nfscan-master File: S3Retrieve.java View source code |
/**
* Gets the input stream containing the contents of this object.
* <p/>
* <p>
* <b>Note</b>: The method is a simple getter and does not actually create a
* stream. If you retrieve an S3Object, you should close this input stream
* as soon as possible, because the object contents aren't buffered in
* memory and stream directly from Amazon S3. Further, failure to close this
* stream can cause the request pool to become blocked.
* </p>
*
* @param bucketName bucket name
* @param key object key
* @return An input stream containing the contents of this object.
*/
protected BufferedInputStream startDownload(String bucketName, String key) {
S3Object object = amazonS3.getObject(bucketName, key);
S3ObjectInputStream inputStream = object.getObjectContent();
return new BufferedInputStream(inputStream);
}Example 20
| Project: flags-master File: S3StateRepositoryTest.java View source code |
@Override
public PutObjectResult putObject(String bucketName, String key, String content) {
Map<String, S3Object> r2 = repo.get(bucketName);
ByteArrayInputStream in = new ByteArrayInputStream(content.getBytes());
S3Object s3obj = new S3Object();
s3obj.setObjectContent(new S3ObjectInputStream(in, null));
r2.put(key, s3obj);
return new PutObjectResult();
}Example 21
| Project: togglz-master File: S3StateRepositoryTest.java View source code |
@Override
public PutObjectResult putObject(String bucketName, String key, String content) {
Map<String, S3Object> r2 = repo.get(bucketName);
ByteArrayInputStream in = new ByteArrayInputStream(content.getBytes());
S3Object s3obj = new S3Object();
s3obj.setObjectContent(new S3ObjectInputStream(in, null));
r2.put(key, s3obj);
return new PutObjectResult();
}Example 22
| Project: cloudstack-master File: S3Utils.java View source code |
// Note that whenever S3ObjectInputStream is returned, client code needs to close the internal stream to avoid resource leak. public static S3ObjectInputStream getObjectStream(final ClientOptions clientOptions, final String bucketName, final String key) { LOGGER.debug(format("Get S3ObjectInputStream from S3 Object %1$s in bucket %2$s", key, bucketName)); return getTransferManager(clientOptions).getAmazonS3Client().getObject(bucketName, key).getObjectContent(); }