Java Examples for com.amazonaws.services.s3.transfer.TransferManager

The following java examples will help you to understand the usage of com.amazonaws.services.s3.transfer.TransferManager. These source code samples are taken from different open source projects.

Example 1
Project: micro-server-master  File: S3UploadSystemTest.java View source code
private static TransferManager createManager() {
    AWSCredentials credentials = new AWSCredentials() {

        @Override
        public String getAWSAccessKeyId() {
            return System.getProperty("s3.accessKey");
        }

        @Override
        public String getAWSSecretKey() {
            return System.getProperty("s3.secretKey");
        }
    };
    return new TransferManager(credentials);
}
Example 2
Project: gradle-aws-plugin-master  File: AmazonS3ProgressiveFileUploadTask.java View source code
@TaskAction
public void upload() throws InterruptedException {
    // to enable conventionMappings feature
    String bucketName = getBucketName();
    String key = getKey();
    File file = getFile();
    if (bucketName == null) {
        throw new GradleException("bucketName is not specified");
    }
    if (key == null) {
        throw new GradleException("key is not specified");
    }
    if (file == null) {
        throw new GradleException("file is not specified");
    }
    if (file.isFile() == false) {
        throw new GradleException("file must be regular file");
    }
    AmazonS3PluginExtension ext = getProject().getExtensions().getByType(AmazonS3PluginExtension.class);
    AmazonS3 s3 = ext.getClient();
    TransferManager s3mgr = TransferManagerBuilder.standard().withS3Client(s3).build();
    getLogger().info("Uploading... s3://{}/{}", bucketName, key);
    Upload upload = s3mgr.upload(new PutObjectRequest(getBucketName(), getKey(), getFile()).withMetadata(getObjectMetadata()));
    upload.addProgressListener(new ProgressListener() {

        public void progressChanged(ProgressEvent event) {
            getLogger().info("  {}% uploaded", upload.getProgress().getPercentTransferred());
        }
    });
    upload.waitForCompletion();
    setResourceUrl(s3.getUrl(bucketName, key).toString());
    getLogger().info("Upload completed: {}", getResourceUrl());
}
Example 3
Project: alluxio-master  File: S3AUnderFileSystem.java View source code
/**
   * Constructs a new instance of {@link S3AUnderFileSystem}.
   *
   * @param uri the {@link AlluxioURI} for this UFS
   * @param conf the configuration for this UFS
   * @return the created {@link S3AUnderFileSystem} instance
   */
public static S3AUnderFileSystem createInstance(AlluxioURI uri, UnderFileSystemConfiguration conf) {
    String bucketName = UnderFileSystemUtils.getBucketName(uri);
    // Set the aws credential system properties based on Alluxio properties, if they are set
    if (conf.containsKey(PropertyKey.S3A_ACCESS_KEY)) {
        System.setProperty(SDKGlobalConfiguration.ACCESS_KEY_SYSTEM_PROPERTY, conf.getValue(PropertyKey.S3A_ACCESS_KEY));
    }
    if (conf.containsKey(PropertyKey.S3A_SECRET_KEY)) {
        System.setProperty(SDKGlobalConfiguration.SECRET_KEY_SYSTEM_PROPERTY, conf.getValue(PropertyKey.S3A_SECRET_KEY));
    }
    // Checks, in order, env variables, system properties, profile file, and instance profile
    AWSCredentialsProvider credentials = new AWSCredentialsProviderChain(new DefaultAWSCredentialsProviderChain());
    // Set the client configuration based on Alluxio configuration values
    ClientConfiguration clientConf = new ClientConfiguration();
    // Socket timeout
    clientConf.setSocketTimeout(Integer.parseInt(conf.getValue(PropertyKey.UNDERFS_S3A_SOCKET_TIMEOUT_MS)));
    // HTTP protocol
    if (Boolean.parseBoolean(conf.getValue(PropertyKey.UNDERFS_S3A_SECURE_HTTP_ENABLED))) {
        clientConf.setProtocol(Protocol.HTTPS);
    } else {
        clientConf.setProtocol(Protocol.HTTP);
    }
    // Proxy host
    if (conf.containsKey(PropertyKey.UNDERFS_S3_PROXY_HOST)) {
        clientConf.setProxyHost(conf.getValue(PropertyKey.UNDERFS_S3_PROXY_HOST));
    }
    // Proxy port
    if (conf.containsKey(PropertyKey.UNDERFS_S3_PROXY_PORT)) {
        clientConf.setProxyPort(Integer.parseInt(conf.getValue(PropertyKey.UNDERFS_S3_PROXY_PORT)));
    }
    int numAdminThreads = Integer.parseInt(conf.getValue(PropertyKey.UNDERFS_S3_ADMIN_THREADS_MAX));
    int numTransferThreads = Integer.parseInt(conf.getValue(PropertyKey.UNDERFS_S3_UPLOAD_THREADS_MAX));
    int numThreads = Integer.parseInt(conf.getValue(PropertyKey.UNDERFS_S3_THREADS_MAX));
    if (numThreads < numAdminThreads + numTransferThreads) {
        LOG.warn("Configured s3 max threads: {} is less than # admin threads: {} plus transfer " + "threads {}. Using admin threads + transfer threads as max threads instead.");
        numThreads = numAdminThreads + numTransferThreads;
    }
    clientConf.setMaxConnections(numThreads);
    // Set client request timeout for all requests since multipart copy is used, and copy parts can
    // only be set with the client configuration.
    clientConf.setRequestTimeout(Integer.parseInt(conf.getValue(PropertyKey.UNDERFS_S3A_REQUEST_TIMEOUT)));
    if (conf.containsKey(PropertyKey.UNDERFS_S3A_SIGNER_ALGORITHM)) {
        clientConf.setSignerOverride(conf.getValue(PropertyKey.UNDERFS_S3A_SIGNER_ALGORITHM));
    }
    AmazonS3Client amazonS3Client = new AmazonS3Client(credentials, clientConf);
    // Set a custom endpoint.
    if (conf.containsKey(PropertyKey.UNDERFS_S3_ENDPOINT)) {
        amazonS3Client.setEndpoint(conf.getValue(PropertyKey.UNDERFS_S3_ENDPOINT));
    }
    // Disable DNS style buckets, this enables path style requests.
    if (Boolean.parseBoolean(conf.getValue(PropertyKey.UNDERFS_S3_DISABLE_DNS_BUCKETS))) {
        S3ClientOptions clientOptions = S3ClientOptions.builder().setPathStyleAccess(true).build();
        amazonS3Client.setS3ClientOptions(clientOptions);
    }
    ExecutorService service = ExecutorServiceFactories.fixedThreadPoolExecutorServiceFactory("alluxio-s3-transfer-manager-worker", numTransferThreads).create();
    TransferManager transferManager = new TransferManager(amazonS3Client, service);
    TransferManagerConfiguration transferConf = new TransferManagerConfiguration();
    transferConf.setMultipartCopyThreshold(MULTIPART_COPY_THRESHOLD);
    transferManager.setConfiguration(transferConf);
    // Default to readable and writable by the user.
    short bucketMode = (short) 700;
    // There is no known account owner by default.
    String accountOwner = "";
    // if ACL enabled inherit bucket acl for all the objects.
    if (Boolean.parseBoolean(conf.getValue(PropertyKey.UNDERFS_S3A_INHERIT_ACL))) {
        String accountOwnerId = amazonS3Client.getS3AccountOwner().getId();
        // Gets the owner from user-defined static mapping from S3 canonical user
        // id to Alluxio user name.
        String owner = CommonUtils.getValueFromStaticMapping(conf.getValue(PropertyKey.UNDERFS_S3_OWNER_ID_TO_USERNAME_MAPPING), accountOwnerId);
        // If there is no user-defined mapping, use the display name.
        if (owner == null) {
            owner = amazonS3Client.getS3AccountOwner().getDisplayName();
        }
        accountOwner = owner == null ? accountOwnerId : owner;
        AccessControlList acl = amazonS3Client.getBucketAcl(bucketName);
        bucketMode = S3AUtils.translateBucketAcl(acl, accountOwnerId);
    }
    return new S3AUnderFileSystem(uri, amazonS3Client, bucketName, bucketMode, accountOwner, transferManager, conf);
}
Example 4
Project: aws-java-sdk-master  File: UploadCallable.java View source code
/**
     * Uploads all parts in the request in serial in this thread, then completes
     * the upload and returns the result.
     */
private UploadResult uploadPartsInSeries(UploadPartRequestFactory requestFactory) {
    final List<PartETag> partETags = new ArrayList<PartETag>();
    while (requestFactory.hasMoreRequests()) {
        if (threadPool.isShutdown())
            throw new CancellationException("TransferManager has been shutdown");
        UploadPartRequest uploadPartRequest = requestFactory.getNextUploadPartRequest();
        // Mark the stream in case we need to reset it
        InputStream inputStream = uploadPartRequest.getInputStream();
        if (inputStream != null && inputStream.markSupported()) {
            if (uploadPartRequest.getPartSize() >= Integer.MAX_VALUE) {
                inputStream.mark(Integer.MAX_VALUE);
            } else {
                inputStream.mark((int) uploadPartRequest.getPartSize());
            }
        }
        partETags.add(s3.uploadPart(uploadPartRequest).getPartETag());
    }
    CompleteMultipartUploadRequest req = new CompleteMultipartUploadRequest(origReq.getBucketName(), origReq.getKey(), multipartUploadId, partETags).withRequesterPays(origReq.isRequesterPays()).withGeneralProgressListener(origReq.getGeneralProgressListener()).withRequestMetricCollector(origReq.getRequestMetricCollector());
    CompleteMultipartUploadResult res = s3.completeMultipartUpload(req);
    UploadResult uploadResult = new UploadResult();
    uploadResult.setBucketName(res.getBucketName());
    uploadResult.setKey(res.getKey());
    uploadResult.setETag(res.getETag());
    uploadResult.setVersionId(res.getVersionId());
    return uploadResult;
}
Example 5
Project: aws-sdk-java-master  File: UploadCallable.java View source code
/**
     * Uploads all parts in the request in serial in this thread, then completes
     * the upload and returns the result.
     */
private UploadResult uploadPartsInSeries(UploadPartRequestFactory requestFactory) {
    final List<PartETag> partETags = new ArrayList<PartETag>();
    while (requestFactory.hasMoreRequests()) {
        if (threadPool.isShutdown())
            throw new CancellationException("TransferManager has been shutdown");
        UploadPartRequest uploadPartRequest = requestFactory.getNextUploadPartRequest();
        // Mark the stream in case we need to reset it
        InputStream inputStream = uploadPartRequest.getInputStream();
        if (inputStream != null && inputStream.markSupported()) {
            if (uploadPartRequest.getPartSize() >= Integer.MAX_VALUE) {
                inputStream.mark(Integer.MAX_VALUE);
            } else {
                inputStream.mark((int) uploadPartRequest.getPartSize());
            }
        }
        partETags.add(s3.uploadPart(uploadPartRequest).getPartETag());
    }
    CompleteMultipartUploadRequest req = new CompleteMultipartUploadRequest(origReq.getBucketName(), origReq.getKey(), multipartUploadId, partETags).withRequesterPays(origReq.isRequesterPays()).withGeneralProgressListener(origReq.getGeneralProgressListener()).withRequestMetricCollector(origReq.getRequestMetricCollector());
    CompleteMultipartUploadResult res = s3.completeMultipartUpload(req);
    UploadResult uploadResult = new UploadResult();
    uploadResult.setBucketName(res.getBucketName());
    uploadResult.setKey(res.getKey());
    uploadResult.setETag(res.getETag());
    uploadResult.setVersionId(res.getVersionId());
    return uploadResult;
}
Example 6
Project: aws-toolkit-eclipse-master  File: TestToolManager.java View source code
/**
     * Download the given object from S3 to the given file, updating the
     * given progress monitor periodically.
     *
     * @param key The key of the object to download.
     * @param destination The destination file to download to.
     * @param monitor The progress monitor to update.
     */
private void download(final String key, final File destination, final IProgressMonitor monitor) {
    try {
        TransferManager tm = getTransferManager();
        Download download = tm.download(TEST_TOOL_BUCKET, key, destination);
        int totalWork = (int) download.getProgress().getTotalBytesToTransfer();
        monitor.beginTask("Downloading DynamoDB Local", totalWork);
        int worked = 0;
        while (!download.isDone()) {
            int bytes = (int) download.getProgress().getBytesTransferred();
            if (bytes > worked) {
                int newWork = bytes - worked;
                monitor.worked(newWork);
                worked = bytes;
            }
            Thread.sleep(500);
        }
    } catch (InterruptedException exception) {
        Thread.currentThread().interrupt();
        throw new RuntimeException("Interrupted while installing DynamoDB Local", exception);
    } catch (AmazonServiceException exception) {
        throw new RuntimeException("Error downloading DynamoDB Local: " + exception.getMessage(), exception);
    }
}
Example 7
Project: SecureShareLib-master  File: S3SiteController.java View source code
@Override
protected UploadResult doInBackground(String... mediaPaths) {
    UploadResult result = null;
    if (null == mediaPaths[0]) {
        jobFailed(null, 7000000, "S3 media path is null");
        return result;
    }
    File mediaFile = new File(mediaPaths[0]);
    if (!mediaFile.exists()) {
        jobFailed(null, 7000001, "S3 media path invalid");
        return result;
    }
    try {
        final AWSCredentials credentials = new BasicAWSCredentials(mContext.getString(R.string.s3_key), mContext.getString(R.string.s3_secret));
        Log.i(TAG, "upload file: " + mediaFile.getName());
        AmazonS3Client s3Client = new AmazonS3Client(credentials, s3Config);
        TransferManager transferManager = new TransferManager(s3Client);
        Upload upload = transferManager.upload(bucket, pathPrefix + mediaFile.getName(), mediaFile);
        result = upload.waitForUploadResult();
    } catch (Exception e) {
        Timber.e("upload error: " + e.getMessage());
        jobFailed(null, 7000002, "S3 upload failed: " + e.getMessage());
    }
    return result;
}
Example 8
Project: storm-s3-master  File: S3MemBufferedOutputStreamTest.java View source code
@Test
public void testStream() throws IOException {
    AWSCredentialsProvider provider = new ProfileCredentialsProvider("aws-testing");
    ClientConfiguration config = new ClientConfiguration();
    AmazonS3 client = new AmazonS3Client(provider.getCredentials(), config);
    String bucketName = "test-bucket-" + System.currentTimeMillis();
    client.createBucket(bucketName);
    TransferManager tx = new TransferManager(client);
    Uploader uploader = new PutRequestUploader(tx.getAmazonS3Client());
    OutputStream outputStream = new S3MemBufferedOutputStream(uploader, bucketName, new DefaultFileNameFormat().withPrefix("test"), "text/plain");
    OutputStreamWriter writer = new OutputStreamWriter(outputStream);
    PrintWriter printer = new PrintWriter(writer);
    printer.println("line1");
    printer.println("line2");
    printer.close();
    ObjectListing objectListing = client.listObjects(bucketName);
    List<S3ObjectSummary> objectSummaries = objectListing.getObjectSummaries();
    assertEquals(1, objectSummaries.size());
    S3ObjectSummary s3ObjectSummary = objectSummaries.get(0);
    InputStreamReader reader = new InputStreamReader(client.getObject(bucketName, s3ObjectSummary.getKey()).getObjectContent());
    BufferedReader r = new BufferedReader(reader);
    assertEquals("line1", r.readLine());
    assertEquals("line2", r.readLine());
    client.deleteObject(bucketName, s3ObjectSummary.getKey());
    client.deleteBucket(bucketName);
}
Example 9
Project: tachyon-master  File: S3AUnderFileSystem.java View source code
/**
   * Constructs a new instance of {@link S3AUnderFileSystem}.
   *
   * @param uri the {@link AlluxioURI} for this UFS
   * @param conf the configuration for this UFS
   * @return the created {@link S3AUnderFileSystem} instance
   */
public static S3AUnderFileSystem createInstance(AlluxioURI uri, UnderFileSystemConfiguration conf) {
    String bucketName = UnderFileSystemUtils.getBucketName(uri);
    // Set the aws credential system properties based on Alluxio properties, if they are set
    if (conf.containsKey(PropertyKey.S3A_ACCESS_KEY)) {
        System.setProperty(SDKGlobalConfiguration.ACCESS_KEY_SYSTEM_PROPERTY, conf.getValue(PropertyKey.S3A_ACCESS_KEY));
    }
    if (conf.containsKey(PropertyKey.S3A_SECRET_KEY)) {
        System.setProperty(SDKGlobalConfiguration.SECRET_KEY_SYSTEM_PROPERTY, conf.getValue(PropertyKey.S3A_SECRET_KEY));
    }
    // Checks, in order, env variables, system properties, profile file, and instance profile
    AWSCredentialsProvider credentials = new AWSCredentialsProviderChain(new DefaultAWSCredentialsProviderChain());
    // Set the client configuration based on Alluxio configuration values
    ClientConfiguration clientConf = new ClientConfiguration();
    // Socket timeout
    clientConf.setSocketTimeout(Integer.parseInt(conf.getValue(PropertyKey.UNDERFS_S3A_SOCKET_TIMEOUT_MS)));
    // HTTP protocol
    if (Boolean.parseBoolean(conf.getValue(PropertyKey.UNDERFS_S3A_SECURE_HTTP_ENABLED))) {
        clientConf.setProtocol(Protocol.HTTPS);
    } else {
        clientConf.setProtocol(Protocol.HTTP);
    }
    // Proxy host
    if (conf.containsKey(PropertyKey.UNDERFS_S3_PROXY_HOST)) {
        clientConf.setProxyHost(conf.getValue(PropertyKey.UNDERFS_S3_PROXY_HOST));
    }
    // Proxy port
    if (conf.containsKey(PropertyKey.UNDERFS_S3_PROXY_PORT)) {
        clientConf.setProxyPort(Integer.parseInt(conf.getValue(PropertyKey.UNDERFS_S3_PROXY_PORT)));
    }
    int numAdminThreads = Integer.parseInt(conf.getValue(PropertyKey.UNDERFS_S3_ADMIN_THREADS_MAX));
    int numTransferThreads = Integer.parseInt(conf.getValue(PropertyKey.UNDERFS_S3_UPLOAD_THREADS_MAX));
    int numThreads = Integer.parseInt(conf.getValue(PropertyKey.UNDERFS_S3_THREADS_MAX));
    if (numThreads < numAdminThreads + numTransferThreads) {
        LOG.warn("Configured s3 max threads: {} is less than # admin threads: {} plus transfer " + "threads {}. Using admin threads + transfer threads as max threads instead.");
        numThreads = numAdminThreads + numTransferThreads;
    }
    clientConf.setMaxConnections(numThreads);
    // Set client request timeout for all requests since multipart copy is used, and copy parts can
    // only be set with the client configuration.
    clientConf.setRequestTimeout(Integer.parseInt(conf.getValue(PropertyKey.UNDERFS_S3A_REQUEST_TIMEOUT)));
    if (conf.containsKey(PropertyKey.UNDERFS_S3A_SIGNER_ALGORITHM)) {
        clientConf.setSignerOverride(conf.getValue(PropertyKey.UNDERFS_S3A_SIGNER_ALGORITHM));
    }
    AmazonS3Client amazonS3Client = new AmazonS3Client(credentials, clientConf);
    // Set a custom endpoint.
    if (conf.containsKey(PropertyKey.UNDERFS_S3_ENDPOINT)) {
        amazonS3Client.setEndpoint(conf.getValue(PropertyKey.UNDERFS_S3_ENDPOINT));
    }
    // Disable DNS style buckets, this enables path style requests.
    if (Boolean.parseBoolean(conf.getValue(PropertyKey.UNDERFS_S3_DISABLE_DNS_BUCKETS))) {
        S3ClientOptions clientOptions = S3ClientOptions.builder().setPathStyleAccess(true).build();
        amazonS3Client.setS3ClientOptions(clientOptions);
    }
    ExecutorService service = ExecutorServiceFactories.fixedThreadPoolExecutorServiceFactory("alluxio-s3-transfer-manager-worker", numTransferThreads).create();
    TransferManager transferManager = new TransferManager(amazonS3Client, service);
    TransferManagerConfiguration transferConf = new TransferManagerConfiguration();
    transferConf.setMultipartCopyThreshold(MULTIPART_COPY_THRESHOLD);
    transferManager.setConfiguration(transferConf);
    // Default to readable and writable by the user.
    short bucketMode = (short) 700;
    // There is no known account owner by default.
    String accountOwner = "";
    // if ACL enabled inherit bucket acl for all the objects.
    if (Boolean.parseBoolean(conf.getValue(PropertyKey.UNDERFS_S3A_INHERIT_ACL))) {
        String accountOwnerId = amazonS3Client.getS3AccountOwner().getId();
        // Gets the owner from user-defined static mapping from S3 canonical user
        // id to Alluxio user name.
        String owner = CommonUtils.getValueFromStaticMapping(conf.getValue(PropertyKey.UNDERFS_S3_OWNER_ID_TO_USERNAME_MAPPING), accountOwnerId);
        // If there is no user-defined mapping, use the display name.
        if (owner == null) {
            owner = amazonS3Client.getS3AccountOwner().getDisplayName();
        }
        accountOwner = owner == null ? accountOwnerId : owner;
        AccessControlList acl = amazonS3Client.getBucketAcl(bucketName);
        bucketMode = S3AUtils.translateBucketAcl(acl, accountOwnerId);
    }
    return new S3AUnderFileSystem(uri, amazonS3Client, bucketName, bucketMode, accountOwner, transferManager, conf);
}
Example 10
Project: ambari-master  File: S3Uploader.java View source code
@VisibleForTesting
protected void uploadFileToS3(String bucketName, String s3Key, File localFile, String accessKey, String secretKey) {
    TransferManager transferManager = S3Util.getTransferManager(accessKey, secretKey);
    try {
        Upload upload = transferManager.upload(bucketName, s3Key, localFile);
        upload.waitForUploadResult();
    } catch (AmazonClientExceptionInterruptedException |  e) {
        LOG.error("s3 uploading failed for file :" + localFile.getAbsolutePath(), e);
    } finally {
        S3Util.shutdownTransferManager(transferManager);
    }
}
Example 11
Project: aws-sdk-for-android-master  File: MultipartUploadCallable.java View source code
public UploadResult call() throws Exception {
    final String bucketName = putObjectRequest.getBucketName();
    final String key = putObjectRequest.getKey();
    fireProgressEvent(ProgressEvent.STARTED_EVENT_CODE);
    String uploadId = initiateMultipartUpload(putObjectRequest);
    long optimalPartSize = TransferManagerUtils.calculateOptimalPartSize(putObjectRequest, configuration);
    log.debug("Calculated optimal part size: " + optimalPartSize);
    try {
        final List<PartETag> partETags = new ArrayList<PartETag>();
        UploadPartRequestFactory requestFactory = new UploadPartRequestFactory(putObjectRequest, uploadId, optimalPartSize);
        if (TransferManagerUtils.isUploadParallelizable(putObjectRequest)) {
            List<Future<PartETag>> futures = new ArrayList<Future<PartETag>>();
            while (requestFactory.hasMoreRequests()) {
                if (threadPool.isShutdown())
                    throw new CancellationException("TransferManager has been shutdown");
                UploadPartRequest request = requestFactory.getNextUploadPartRequest();
                futures.add(threadPool.submit(new UploadPartCallable(s3, request)));
            }
            this.collectPartETags(futures, partETags);
        } else {
            while (requestFactory.hasMoreRequests()) {
                if (threadPool.isShutdown())
                    throw new CancellationException("TransferManager has been shutdown");
                partETags.add(s3.uploadPart(requestFactory.getNextUploadPartRequest()).getPartETag());
            }
        }
        CompleteMultipartUploadResult completeMultipartUploadResult = s3.completeMultipartUpload(new CompleteMultipartUploadRequest(bucketName, key, uploadId, partETags));
        fireProgressEvent(ProgressEvent.COMPLETED_EVENT_CODE);
        UploadResult uploadResult = new UploadResult();
        uploadResult.setBucketName(completeMultipartUploadResult.getBucketName());
        uploadResult.setKey(completeMultipartUploadResult.getKey());
        uploadResult.setETag(completeMultipartUploadResult.getETag());
        uploadResult.setVersionId(completeMultipartUploadResult.getVersionId());
        return uploadResult;
    } catch (Exception e) {
        fireProgressEvent(ProgressEvent.FAILED_EVENT_CODE);
        try {
            s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, key, uploadId));
        } catch (Exception e2) {
            log.info("Unable to abort multipart upload, you may need to manually remove uploaded parts: " + e2.getMessage(), e2);
        }
        throw e;
    } finally {
        if (putObjectRequest.getInputStream() != null) {
            try {
                putObjectRequest.getInputStream().close();
            } catch (Exception e) {
                log.warn("Unable to cleanly close input stream: " + e.getMessage(), e);
            }
        }
    }
}
Example 12
Project: BoxMeBackend-master  File: S3TransferProgressSample.java View source code
public static void main(String[] args) throws Exception {
    credentials = new PropertiesCredentials(S3TransferProgressSample.class.getResourceAsStream("AwsCredentials.properties"));
    // TransferManager manages a pool of threads, so we create a
    // single instance and share it throughout our application.
    tx = new TransferManager(credentials);
    bucketName = "s3-upload-sdk-sample-" + credentials.getAWSAccessKeyId().toLowerCase();
    new S3TransferProgressSample();
}
Example 13
Project: DeployMan-master  File: RemoteRepository.java View source code
private void waitForUpload(Transfer upload, TransferManager tm) throws AmazonServiceException, AmazonClientException, InterruptedException {
    long bytes = upload.getProgress().getTotalBytesToTransfer();
    //$NON-NLS-1$
    console.write(new Size(bytes) + " to upload");
    long fraction = bytes / 50;
    upload.addProgressListener(new SharpProgressListener(fraction));
    upload.waitForCompletion();
    tm.shutdownNow();
    //$NON-NLS-1$
    console.write("\nDone");
}
Example 14
Project: incubator-streams-master  File: S3OutputStreamWrapper.java View source code
private void addFile() throws Exception {
    InputStream is = new ByteArrayInputStream(this.outputStream.toByteArray());
    int contentLength = outputStream.size();
    TransferManager transferManager = new TransferManager(amazonS3Client);
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setExpirationTime(DateTime.now().plusDays(365 * 3).toDate());
    metadata.setContentLength(contentLength);
    metadata.addUserMetadata("writer", "org.apache.streams");
    for (String s : metaData.keySet()) {
        metadata.addUserMetadata(s, metaData.get(s));
    }
    String fileNameToWrite = path + fileName;
    Upload upload = transferManager.upload(bucketName, fileNameToWrite, is, metadata);
    try {
        upload.waitForUploadResult();
        is.close();
        transferManager.shutdownNow(false);
        LOGGER.info("S3 File Close[{} kb] - {}", contentLength / 1024, path + fileName);
    } catch (Exception ignored) {
        LOGGER.trace("Ignoring", ignored);
    }
}
Example 15
Project: jackrabbit-master  File: S3Backend.java View source code
public void init(CachingDataStore store, String homeDir, Properties prop) throws DataStoreException {
    ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
    try {
        startTime = new Date();
        Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
        LOG.debug("init");
        setDataStore(store);
        s3ReqDecorator = new S3RequestDecorator(prop);
        s3service = Utils.openService(prop);
        if (bucket == null || "".equals(bucket.trim())) {
            bucket = prop.getProperty(S3Constants.S3_BUCKET);
        }
        String region = prop.getProperty(S3Constants.S3_REGION);
        Region s3Region = null;
        if (StringUtils.isNullOrEmpty(region)) {
            com.amazonaws.regions.Region ec2Region = Regions.getCurrentRegion();
            if (ec2Region != null) {
                s3Region = Region.fromValue(ec2Region.getName());
            } else {
                throw new AmazonClientException("parameter [" + S3Constants.S3_REGION + "] not configured and cannot be derived from environment");
            }
        } else {
            if (Utils.DEFAULT_AWS_BUCKET_REGION.equals(region)) {
                s3Region = Region.US_Standard;
            } else if (Region.EU_Ireland.toString().equals(region)) {
                s3Region = Region.EU_Ireland;
            } else {
                s3Region = Region.fromValue(region);
            }
        }
        if (!s3service.doesBucketExist(bucket)) {
            s3service.createBucket(bucket, s3Region);
            LOG.info("Created bucket [{}] in [{}] ", bucket, region);
        } else {
            LOG.info("Using bucket [{}] in [{}] ", bucket, region);
        }
        int writeThreads = 10;
        String writeThreadsStr = prop.getProperty(S3Constants.S3_WRITE_THREADS);
        if (writeThreadsStr != null) {
            writeThreads = Integer.parseInt(writeThreadsStr);
        }
        LOG.info("Using thread pool of [{}] threads in S3 transfer manager.", writeThreads);
        tmx = new TransferManager(s3service, (ThreadPoolExecutor) Executors.newFixedThreadPool(writeThreads, new NamedThreadFactory("s3-transfer-manager-worker")));
        int asyncWritePoolSize = 10;
        String maxConnsStr = prop.getProperty(S3Constants.S3_MAX_CONNS);
        if (maxConnsStr != null) {
            asyncWritePoolSize = Integer.parseInt(maxConnsStr) - writeThreads;
        }
        setAsyncWritePoolSize(asyncWritePoolSize);
        String renameKeyProp = prop.getProperty(S3Constants.S3_RENAME_KEYS);
        boolean renameKeyBool = (renameKeyProp == null || "".equals(renameKeyProp)) ? false : Boolean.parseBoolean(renameKeyProp);
        LOG.info("Rename keys [{}]", renameKeyBool);
        if (renameKeyBool) {
            renameKeys();
        }
        LOG.debug("S3 Backend initialized in [{}] ms", +(System.currentTimeMillis() - startTime.getTime()));
    } catch (Exception e) {
        LOG.debug("  error ", e);
        throw new DataStoreException("Could not initialize S3 from " + prop, e);
    } finally {
        if (contextClassLoader != null) {
            Thread.currentThread().setContextClassLoader(contextClassLoader);
        }
    }
}
Example 16
Project: load_web_tests_based_on_cloud_computing-master  File: S3TransferProgressSample.java View source code
public static void main(String[] args) throws Exception {
    credentials = new PropertiesCredentials(S3TransferProgressSample.class.getResourceAsStream("AwsCredentials.properties"));
    // TransferManager manages a pool of threads, so we create a
    // single instance and share it throughout our application.
    tx = new TransferManager(credentials);
    bucketName = "s3-upload-sdk-sample-" + credentials.getAWSAccessKeyId().toLowerCase();
    new S3TransferProgressSample();
}
Example 17
Project: march4-master  File: S3TransferProgressSample.java View source code
public static void main(String[] args) throws Exception {
    /*
         * This credentials provider implementation loads your AWS credentials
         * from a properties file at the root of your classpath.
         *
         * TransferManager manages a pool of threads, so we create a
         * single instance and share it throughout our application.
         */
    AmazonS3 s3 = new AmazonS3Client(credentials = new ClasspathPropertiesFileCredentialsProvider().getCredentials());
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);
    tx = new TransferManager(s3);
    bucketName = "s3-upload-sdk-sample-" + credentials.getAWSAccessKeyId().toLowerCase();
    new S3TransferProgressSample();
}
Example 18
Project: NemakiWare-master  File: BackupCouchDbToS3Util.java View source code
public void backup(String bucketName, URI couchUrl, String profileName, String[] targets) {
    String[] targetDbs = targets;
    AmazonS3 s3client = new AmazonS3Client(new ProfileCredentialsProvider(profileName));
    TransferManager tm = new TransferManager(s3client);
    try {
        String uriScheme = couchUrl.getScheme();
        if (targetDbs == null || targetDbs.length == 0) {
            URI targetRestURL = couchUrl;
            if (uriScheme.equals("file")) {
                try {
                    targetRestURL = new URI(DefaultCouchDbUrl);
                } catch (URISyntaxException e) {
                }
            }
            List<String> list = getAllTargets(targetRestURL);
            targetDbs = (String[]) list.toArray(new String[0]);
        }
        for (String repositoryName : targetDbs) {
            System.out.println("[" + repositoryName + "] Backup started. ");
            if (repositoryName == null || repositoryName == "")
                continue;
            File file;
            try {
                if (uriScheme.equals("http") || uriScheme.equals("https")) {
                    file = File.createTempFile(repositoryName, ".bk.dump");
                    String couchURI = couchUrl.toString();
                    DumpAction action = DumpAction.getInstance(couchURI, repositoryName, file, false);
                    action.dump();
                } else if (uriScheme.equals("file")) {
                    couchUrl = couchUrl.resolve(repositoryName + ".couch");
                    file = new File(couchUrl);
                } else {
                    System.out.printf("Error : Invalid scheme :  %s \n", uriScheme);
                    continue;
                }
                if (file.exists()) {
                    uploadS3(tm, file, bucketName, repositoryName);
                } else {
                    System.out.println("Error : Backup file not found");
                    System.out.println(file.getPath());
                    System.out.println(couchUrl.toString());
                    continue;
                }
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    } finally {
        tm.shutdownNow(true);
    }
}
Example 19
Project: seqware-master  File: ProvisionFilesUtil.java View source code
/**
     *
     * @param reader
     * @param output
     * @param fullOutputPath
     * @param connectionTimeout
     * @param maxConnections
     * @param maxErrorRetry
     * @param socketTimeout
     * @param decryptCipher
     * @param encryptCipher
     * @return
     */
public boolean putToS3(InputStream reader, String output, boolean fullOutputPath, int connectionTimeout, int maxConnections, int maxErrorRetry, int socketTimeout, Cipher decryptCipher, Cipher encryptCipher) {
    // can encode the access key and secret key within the URL
    // see http://www.cs.rutgers.edu/~watrous/user-pass-url.html
    Pattern p = Pattern.compile("s3://(\\S+):(\\S+)@(\\S+)");
    Matcher m = p.matcher(output);
    boolean result = m.find();
    String accessKey;
    String secretKey;
    String stringURL = output;
    if (result) {
        accessKey = m.group(1);
        secretKey = m.group(2);
        stringURL = "s3://" + m.group(3);
    } else {
        // get the access/secret key from the .seqware/settings file
        try {
            HashMap<String, String> settings = (HashMap<String, String>) ConfigTools.getSettings();
            accessKey = settings.get(SqwKeys.AWS_ACCESS_KEY.getSettingKey());
            secretKey = settings.get(SqwKeys.AWS_SECRET_KEY.getSettingKey());
        } catch (Exception e) {
            Log.error(e.getMessage());
            return false;
        }
    }
    if (accessKey == null || secretKey == null) {
        Log.error("Couldn't find access or secret key for S3 output so will exit!");
        return false;
    }
    // parse out the bucket and key
    p = Pattern.compile("s3://([^/]+)/*(\\S*)");
    m = p.matcher(stringURL);
    result = m.find();
    if (result) {
        String bucket = m.group(1);
        String key = m.group(2);
        if (key == null) {
            key = "";
        }
        if (key.endsWith("/")) {
            // then add fileName to the target
            key = key + fileName;
        } else if (!key.endsWith(fileName) && !fullOutputPath) {
            // then add a / then fileName to the target
            key = key + "/" + fileName;
        }
        ObjectMetadata omd = new ObjectMetadata();
        // this is the size of what's being read
        omd.setContentLength(this.inputSize);
        // just encrypt everything via Server-Side encryption, see
        // http://docs.amazonwebservices.com/AmazonS3/latest/dev/SSEUsingJavaSDK.html
        omd.setServerSideEncryption(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        BasicAWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
        ClientConfiguration config = new ClientConfiguration();
        config.setConnectionTimeout(connectionTimeout);
        config.setMaxConnections(maxConnections);
        config.setMaxErrorRetry(maxErrorRetry);
        config.setProtocol(Protocol.HTTPS);
        config.setSocketTimeout(socketTimeout);
        AmazonS3Client s3 = new AmazonS3Client(credentials, config);
        TransferManager tm = new TransferManager(s3);
        // if reading from a local file and not decrypting or encrypting then we can use the API call below that works on a file
        if (this.inputFile != null && decryptCipher == null && encryptCipher == null) {
            // just go ahead and close this, won't use it
            try {
                reader.close();
            } catch (IOException e1) {
                Log.error(e1.getMessage());
            }
            Log.info("S3 WRITES: BUCKET: " + bucket + " KEY: " + key + " INPUT FILE: " + inputFile);
            Upload upload = tm.upload(bucket, key, this.inputFile);
            boolean uploadStatus = (waitForS3Upload(upload));
            if (!uploadStatus) {
                Log.error("The S3 upload returned false!");
                tm.shutdownNow();
                return (false);
            }
            // now that the copy is complete, make sure the file out and the input size are equal
            try {
                ObjectMetadata om = s3.getObjectMetadata(bucket, key);
                if (this.inputSize != om.getContentLength()) {
                    Log.error("The S3 output file size of " + om.getContentLength() + " and the input file size of " + this.inputSize + " do not match so the file provisioning failed!");
                    tm.shutdownNow();
                    return (false);
                }
            } catch (Exception e) {
                Log.error("Can't get metadata on key: " + key + " bucket: " + bucket);
            }
        } else {
            // add decryption to the reader
            if (decryptCipher != null) {
                reader = new CipherInputStream(reader, decryptCipher);
            }
            // add encryption to the output stream
            if (encryptCipher != null) {
                reader = new CipherInputStream(reader, encryptCipher);
            }
            // trigger the upload
            Transfer myUpload = tm.upload(bucket, key, reader, omd);
            boolean uploadStatus = waitForS3Upload(myUpload);
            if (!uploadStatus) {
                tm.shutdownNow();
                Log.error("S3 Upload failed:" + myUpload);
                return (false);
            }
        }
        // need to shut down the transfer manager
        tm.shutdownNow();
    // this is how to do it without multipart, not usable for large files!
    // s3.putObject(bucket, key, reader, new ObjectMetadata());
    } else {
        Log.error("Unable to parse a bucket and file name from " + stringURL + " it should be in the form s3://<bucket>/<key>/ or s3://<bucket>/");
        return false;
    }
    return true;
}
Example 20
Project: cloudExplorer-master  File: BucketClass.java View source code
String abortMPUploads(String access_key, String secret_key, String bucket, String endpoint) {
    String message = null;
    AWSCredentials credentials = new BasicAWSCredentials(access_key, secret_key);
    AmazonS3 s3Client = new AmazonS3Client(credentials, new ClientConfiguration());
    try {
        if (endpoint.contains("amazonaws.com")) {
            String aws_endpoint = s3Client.getBucketLocation(new GetBucketLocationRequest(bucket));
            if (aws_endpoint.contains("US")) {
                s3Client.setEndpoint("https://s3.amazonaws.com");
            } else if (aws_endpoint.contains("us-west")) {
                s3Client.setEndpoint("https://s3-" + aws_endpoint + ".amazonaws.com");
            } else if (aws_endpoint.contains("eu-west")) {
                s3Client.setEndpoint("https://s3-" + aws_endpoint + ".amazonaws.com");
            } else if (aws_endpoint.contains("ap-")) {
                s3Client.setEndpoint("https://s3-" + aws_endpoint + ".amazonaws.com");
            } else if (aws_endpoint.contains("sa-east-1")) {
                s3Client.setEndpoint("https://s3-" + aws_endpoint + ".amazonaws.com");
            } else {
                s3Client.setEndpoint("https://s3." + aws_endpoint + ".amazonaws.com");
            }
        } else {
            s3Client.setS3ClientOptions(S3ClientOptions.builder().setPathStyleAccess(true).build());
            s3Client.setEndpoint(endpoint);
        }
        TransferManager tm = new TransferManager(s3Client);
        int month = 1000 * 60 * 60 * 24 * 30;
        Date oneWeekAgo = new Date(System.currentTimeMillis() - month);
        tm.abortMultipartUploads(bucket, oneWeekAgo);
        message = ("\nSent request to delete all the multi-part uploads in the past month");
    } catch (AmazonServiceException multipart) {
        if (NewJFrame.gui) {
            mainFrame.jTextArea1.append("\n\nError Message:    " + multipart.getMessage());
            mainFrame.jTextArea1.append("\nHTTP Status Code: " + multipart.getStatusCode());
            mainFrame.jTextArea1.append("\nAWS Error Code:   " + multipart.getErrorCode());
            mainFrame.jTextArea1.append("\nError Type:       " + multipart.getErrorType());
            mainFrame.jTextArea1.append("\nRequest ID:       " + multipart.getRequestId());
            calibrate();
        } else {
            System.out.print("\n\nError Message:    " + multipart.getMessage());
            System.out.print("\nHTTP Status Code: " + multipart.getStatusCode());
            System.out.print("\nAWS Error Code:   " + multipart.getErrorCode());
            System.out.print("\nError Type:       " + multipart.getErrorType());
            System.out.print("\nRequest ID:       " + multipart.getRequestId());
        }
    }
    if (message == null) {
        message = "Failed to list multi-part uploads.";
    }
    return message;
}
Example 21
Project: cloudpier-adapters-master  File: BeanstalkFirstDeploymentNoGUI.java View source code
public boolean deploy(String war, String AWSKeyId, String AWSSecretKey, String applicationname, String applicationversion, String environment, String bucket, String host) throws BeanstalkAdapterException {
    boolean ret = false;
    //  credentials = new PropertiesCredentials(BeanstalkDeploy.class.getResourceAsStream("AwsCredentials.properties"))
    BasicAWSCredentials basic_credentials = new BasicAWSCredentials(AWSKeyId, AWSSecretKey);
    // TransferManager manages a pool of threads, so we create a
    // single instance and share it throughout our application.
    tx = new TransferManager(basic_credentials);
    appname = applicationname;
    appversion = applicationversion;
    accessKeyId = AWSKeyId;
    secretAccessKey = AWSSecretKey;
    bucketName = bucket;
    environment_name = environment;
    host_name = host;
    war_name_on_s3 = war;
    //STEP 1: UPLOAD
    //STEP 2: CREATE APP VERSION
    //STEP 3: DEPLOY
    //TODO
    //take war file path from args
    //local_filename_and_path ="../sdsdsds/sts.war";
    actionPerformed(war_name_on_s3, accessKeyId, secretAccessKey, appname, appversion, environment_name, bucketName, host_name);
    return ret;
}
Example 22
Project: cloudpier-core-master  File: BeanstalkFirstDeployment.java View source code
public boolean deploy(String war, String AWSKeyId, String AWSSecretKey, String applicationname, String applicationversion, String environment, String bucket, String host) {
    //throws Exception {
    boolean ret = false;
    //  credentials = new PropertiesCredentials(BeanstalkDeploy.class.getResourceAsStream("AwsCredentials.properties"))
    BasicAWSCredentials basic_credentials = new BasicAWSCredentials(AWSKeyId, AWSSecretKey);
    // TransferManager manages a pool of threads, so we create a
    // single instance and share it throughout our application.
    tx = new TransferManager(basic_credentials);
    appname = applicationname;
    appversion = applicationversion;
    accessKeyId = AWSKeyId;
    secretAccessKey = AWSSecretKey;
    bucketName = bucket;
    environment_name = environment;
    host_name = host;
    return ret;
}
Example 23
Project: cloudstack-master  File: S3Utils.java View source code
public static TransferManager getTransferManager(final ClientOptions clientOptions) {
    if (TRANSFERMANAGER_ACCESSKEY_MAP.containsKey(clientOptions.getAccessKey())) {
        return TRANSFERMANAGER_ACCESSKEY_MAP.get(clientOptions.getAccessKey());
    }
    final AWSCredentials basicAWSCredentials = new BasicAWSCredentials(clientOptions.getAccessKey(), clientOptions.getSecretKey());
    final ClientConfiguration configuration = new ClientConfiguration();
    if (clientOptions.isHttps() != null) {
        configuration.setProtocol(clientOptions.isHttps() ? HTTPS : HTTP);
    }
    if (clientOptions.getConnectionTimeout() != null) {
        configuration.setConnectionTimeout(clientOptions.getConnectionTimeout());
    }
    if (clientOptions.getMaxErrorRetry() != null) {
        configuration.setMaxErrorRetry(clientOptions.getMaxErrorRetry());
    }
    if (clientOptions.getSocketTimeout() != null) {
        configuration.setSocketTimeout(clientOptions.getSocketTimeout());
    }
    if (clientOptions.getUseTCPKeepAlive() != null) {
        configuration.setUseTcpKeepAlive(clientOptions.getUseTCPKeepAlive());
    }
    if (clientOptions.getConnectionTtl() != null) {
        configuration.setConnectionTTL(clientOptions.getConnectionTtl());
    }
    if (clientOptions.getSigner() != null) {
        configuration.setSignerOverride(clientOptions.getSigner());
    }
    LOGGER.debug(format("Creating S3 client with configuration: [protocol: %1$s, signer: %2$s, connectionTimeOut: %3$s, maxErrorRetry: %4$s, socketTimeout: %5$s, useTCPKeepAlive: %6$s, connectionTtl: %7$s]", configuration.getProtocol(), configuration.getSignerOverride(), configuration.getConnectionTimeout(), configuration.getMaxErrorRetry(), configuration.getSocketTimeout(), clientOptions.getUseTCPKeepAlive(), clientOptions.getConnectionTtl()));
    final AmazonS3Client client = new AmazonS3Client(basicAWSCredentials, configuration);
    if (isNotBlank(clientOptions.getEndPoint())) {
        LOGGER.debug(format("Setting the end point for S3 client with access key %1$s to %2$s.", clientOptions.getAccessKey(), clientOptions.getEndPoint()));
        client.setEndpoint(clientOptions.getEndPoint());
    }
    TRANSFERMANAGER_ACCESSKEY_MAP.put(clientOptions.getAccessKey(), new TransferManager(client));
    return TRANSFERMANAGER_ACCESSKEY_MAP.get(clientOptions.getAccessKey());
}
Example 24
Project: dataservices-sdk-java-master  File: BasicS3Test.java View source code
@Test
public void testMultipartUpload() throws Exception {
    String key = "multipartKey";
    // write large file (must be a file to support parallel uploads)
    File tmpFile = File.createTempFile("random", "bin");
    tmpFile.deleteOnExit();
    // 31M (not a power of 2)
    int objectSize = 31 * 1000 * 1000;
    copyStream(new RandomInputStream(objectSize), new FileOutputStream(tmpFile));
    assertEquals("tmp file is not the right size", objectSize, tmpFile.length());
    ThreadPoolExecutor executor = new ThreadPoolExecutor(10, 10, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(50));
    TransferManager tm = new TransferManager(s3, executor);
    PutObjectRequest request = new PutObjectRequest(getTestBucket(), key, tmpFile);
    request.setMetadata(new ObjectMetadata());
    request.getMetadata().addUserMetadata("selector", "one");
    Upload upload = tm.upload(request);
    upload.waitForCompletion();
    S3Object object = s3.getObject(getTestBucket(), key);
    int size = copyStream(object.getObjectContent(), null);
    assertEquals("Wrong object size", objectSize, size);
    s3.deleteObject(getTestBucket(), key);
}
Example 25
Project: elasticsearch-lambda-master  File: S3SnapshotTransport.java View source code
@Override
protected void init() {
    tx = new TransferManager(getS3Client(), createDefaultExecutorService());
    objectMetadataProvider = new ObjectMetadataProvider() {

        @Override
        public void provideObjectMetadata(File file, ObjectMetadata metadata) {
            metadata.setSSEAlgorithm("AES256");
            metadata.setContentLength(file.length());
        }
    };
}
Example 26
Project: medusa-glacier-master  File: S3TransferProgressSample.java View source code
public static void main(String[] args) throws Exception {
    /*
         * The ProfileCredentialsProvider will return your [default]
         * credential profile by reading from the credentials file located at
         * (~/.aws/credentials).
         *
         * TransferManager manages a pool of threads, so we create a
         * single instance and share it throughout our application.
         */
    try {
        credentials = new ProfileCredentialsProvider().getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (~/.aws/credentials), and is in valid format.", e);
    }
    AmazonS3 s3 = new AmazonS3Client(credentials);
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);
    tx = new TransferManager(s3);
    bucketName = "s3-upload-sdk-sample-" + StringUtils.lowerCase(credentials.getAWSAccessKeyId());
    new S3TransferProgressSample();
}
Example 27
Project: event-collector-master  File: TestS3Combine.java View source code
@BeforeClass
@Parameters({ "aws-credentials-file", "aws-test-bucket" })
public void setUpClass(String awsCredentialsFile, String awsTestBucket) throws Exception {
    String credentialsJson = Files.toString(new File(awsCredentialsFile), Charsets.UTF_8);
    Map<String, String> map = JsonCodec.mapJsonCodec(String.class, String.class).fromJson(credentialsJson);
    String awsAccessKey = map.get("access-id");
    String awsSecretKey = map.get("private-key");
    AWSCredentials awsCredentials = new BasicAWSCredentials(awsAccessKey, awsSecretKey);
    service = new AmazonS3Client(awsCredentials);
    transferManager = new TransferManager(awsCredentials);
    testBucket = awsTestBucket;
    if (!service.doesBucketExist(testBucket)) {
        service.createBucket(testBucket);
    }
}
Example 28
Project: opencast-master  File: AwsS3DistributionServiceImpl.java View source code
public void activate(ComponentContext cc) {
    // Get the configuration
    if (cc != null) {
        if (!Boolean.valueOf(getAWSConfigKey(cc, AWS_S3_DISTRIBUTION_ENABLE))) {
            logger.info("AWS S3 distribution disabled");
            return;
        }
        // AWS S3 bucket name
        bucketName = getAWSConfigKey(cc, AWS_S3_BUCKET_CONFIG);
        logger.info("AWS S3 bucket name is {}", bucketName);
        // AWS region
        String regionStr = getAWSConfigKey(cc, AWS_S3_REGION_CONFIG);
        logger.info("AWS region is {}", regionStr);
        opencastDistributionUrl = getAWSConfigKey(cc, AWS_S3_DISTRIBUTION_BASE_CONFIG);
        if (!opencastDistributionUrl.endsWith("/")) {
            opencastDistributionUrl = opencastDistributionUrl + "/";
        }
        logger.info("AWS distribution url is {}", opencastDistributionUrl);
        String accessKeyId = getAWSConfigKey(cc, AWS_S3_ACCESS_KEY_ID_CONFIG);
        String accessKeySecret = getAWSConfigKey(cc, AWS_S3_SECRET_ACCESS_KEY_CONFIG);
        // Create AWS client.
        // Use the default credentials provider chain, which
        // will look at the environment variables, java system props, credential files, and instance
        // profile credentials
        BasicAWSCredentials awsCreds = new BasicAWSCredentials(accessKeyId, accessKeySecret);
        s3 = AmazonS3ClientBuilder.standard().withRegion(regionStr).withCredentials(new AWSStaticCredentialsProvider(awsCreds)).build();
        s3TransferManager = new TransferManager(s3);
        // Create AWS S3 bucket if not there yet
        createAWSBucket();
        this.distributionChannel = OsgiUtil.getComponentContextProperty(cc, CONFIG_KEY_STORE_TYPE);
        logger.info("AwsS3DistributionService activated!");
    }
}
Example 29
Project: eoulsan-master  File: S3DataProtocol.java View source code
//
// Other methods
//
/**
   * Get the AmazonS3 object.
   * @return an AmazonS3
   */
private AmazonS3 getS3() {
    if (this.s3 == null) {
        final Settings settings = EoulsanRuntime.getSettings();
        this.s3 = new AmazonS3Client(new BasicAWSCredentials(settings.getAWSAccessKey(), settings.getAWSSecretKey()));
        getLogger().info("AWS S3 account owner: " + this.s3.getS3AccountOwner());
        this.tx = new TransferManager(this.s3);
    }
    return this.s3;
}
Example 30
Project: openbd-core-master  File: BackgroundUploader.java View source code
private void uploadFile(Map<String, Object> jobFile) {
    File localFile = new File((String) jobFile.get("localpath"));
    if (!localFile.isFile()) {
        removeJobFile(jobFile);
        callbackCfc(jobFile, false, "local file no longer exists");
        cfEngine.log("AmazonS3Write.BackgroundUploader: file no longer exists=" + localFile.getName());
        return;
    }
    // Setup the object data
    ObjectMetadata omd = new ObjectMetadata();
    if (jobFile.containsKey("metadata"))
        omd.setUserMetadata((Map<String, String>) jobFile.get("metadata"));
    TransferManager tm = null;
    AmazonS3 s3Client = null;
    try {
        AmazonKey amazonKey = (AmazonKey) jobFile.get("amazonkey");
        s3Client = new AmazonBase().getAmazonS3(amazonKey);
        PutObjectRequest por = new PutObjectRequest((String) jobFile.get("bucket"), (String) jobFile.get("key"), localFile);
        por.setMetadata(omd);
        por.setStorageClass((StorageClass) jobFile.get("storage"));
        if (jobFile.containsKey("acl"))
            por.setCannedAcl(amazonKey.getAmazonCannedAcl((String) jobFile.get("acl")));
        if (jobFile.containsKey("aes256key"))
            por.setSSECustomerKey(new SSECustomerKey((String) jobFile.get("aes256key")));
        if (jobFile.containsKey("customheaders")) {
            Map<String, String> customheaders = (Map) jobFile.get("customheaders");
            Iterator<String> it = customheaders.keySet().iterator();
            while (it.hasNext()) {
                String k = it.next();
                por.putCustomRequestHeader(k, customheaders.get(k));
            }
        }
        long startTime = System.currentTimeMillis();
        tm = new TransferManager(s3Client);
        Upload upload = tm.upload(por);
        upload.waitForCompletion();
        log(jobFile, "Uploaded; timems=" + (System.currentTimeMillis() - startTime));
        removeJobFile(jobFile);
        callbackCfc(jobFile, true, null);
        if ((Boolean) jobFile.get("deletefile"))
            localFile.delete();
    } catch (Exception e) {
        log(jobFile, "Failed=" + e.getMessage());
        callbackCfc(jobFile, false, e.getMessage());
        int retry = (Integer) jobFile.get("retry");
        int attempt = (Integer) jobFile.get("attempt") + 1;
        if (retry == attempt) {
            removeJobFile(jobFile);
        } else {
            jobFile.put("attempt", attempt);
            jobFile.put("attemptdate", System.currentTimeMillis() + (Long) jobFile.get("retryms"));
            acceptFile(jobFile);
        }
        if (s3Client != null)
            cleanupMultiPartUploads(s3Client, (String) jobFile.get("bucket"));
    } finally {
        if (tm != null)
            tm.shutdownNow(true);
    }
}
Example 31
Project: weblounge-master  File: S3DeployMojo.java View source code
/**
   * 
   * {@inheritDoc}
   * 
   * @see org.apache.maven.plugin.Mojo#execute()
   */
public void execute() throws MojoExecutionException, MojoFailureException {
    // Setup AWS S3 client
    AWSCredentials credentials = new BasicAWSCredentials(awsAccessKey, awsSecretKey);
    AmazonS3Client uploadClient = new AmazonS3Client(credentials);
    TransferManager transfers = new TransferManager(credentials);
    // end
    if (keyPrefix.startsWith("/"))
        keyPrefix = keyPrefix.substring(1);
    if (!keyPrefix.endsWith("/"))
        keyPrefix = keyPrefix + "/";
    // Keep track of how much data has been transferred
    long totalBytesTransferred = 0L;
    int items = 0;
    Queue<Upload> uploads = new LinkedBlockingQueue<Upload>();
    try {
        // Check if S3 bucket exists
        getLog().debug("Checking whether bucket " + bucket + " exists");
        if (!uploadClient.doesBucketExist(bucket)) {
            getLog().error("Desired bucket '" + bucket + "' does not exist!");
            return;
        }
        getLog().debug("Collecting files to transfer from " + resources.getDirectory());
        List<File> res = getResources();
        for (File file : res) {
            // Make path of resource relative to resources directory
            String filename = file.getName();
            String extension = FilenameUtils.getExtension(filename);
            String path = file.getPath().substring(resources.getDirectory().length());
            String key = concat("/", keyPrefix, path).substring(1);
            // Delete old file version in bucket
            getLog().debug("Removing existing object at " + key);
            uploadClient.deleteObject(bucket, key);
            // Setup meta data
            ObjectMetadata meta = new ObjectMetadata();
            meta.setCacheControl("public, max-age=" + String.valueOf(valid * 3600));
            FileInputStream fis = null;
            GZIPOutputStream gzipos = null;
            final File fileToUpload;
            if (gzip && ("js".equals(extension) || "css".equals(extension))) {
                try {
                    fis = new FileInputStream(file);
                    File gzFile = File.createTempFile(file.getName(), null);
                    gzipos = new GZIPOutputStream(new FileOutputStream(gzFile));
                    IOUtils.copy(fis, gzipos);
                    fileToUpload = gzFile;
                    meta.setContentEncoding("gzip");
                    if ("js".equals(extension))
                        meta.setContentType("text/javascript");
                    if ("css".equals(extension))
                        meta.setContentType("text/css");
                } catch (FileNotFoundException e) {
                    getLog().error(e);
                    continue;
                } catch (IOException e) {
                    getLog().error(e);
                    continue;
                } finally {
                    IOUtils.closeQuietly(fis);
                    IOUtils.closeQuietly(gzipos);
                }
            } else {
                fileToUpload = file;
            }
            // Do a random check for existing errors before starting the next upload
            if (erroneousUpload != null)
                break;
            // Create put object request
            long bytesToTransfer = fileToUpload.length();
            totalBytesTransferred += bytesToTransfer;
            PutObjectRequest request = new PutObjectRequest(bucket, key, fileToUpload);
            request.setProgressListener(new UploadListener(credentials, bucket, key, bytesToTransfer));
            request.setMetadata(meta);
            // Schedule put object request
            getLog().info("Uploading " + key + " (" + FileUtils.byteCountToDisplaySize((int) bytesToTransfer) + ")");
            Upload upload = transfers.upload(request);
            uploads.add(upload);
            items++;
        }
    } catch (AmazonServiceException e) {
        getLog().error("Uploading resources failed: " + e.getMessage());
    } catch (AmazonClientException e) {
        getLog().error("Uploading resources failed: " + e.getMessage());
    }
    // Wait for uploads to be finished
    String currentUpload = null;
    try {
        Thread.sleep(1000);
        getLog().info("Waiting for " + uploads.size() + " uploads to finish...");
        while (!uploads.isEmpty()) {
            Upload upload = uploads.poll();
            currentUpload = upload.getDescription().substring("Uploading to ".length());
            if (TransferState.InProgress.equals(upload.getState()))
                getLog().debug("Waiting for upload " + currentUpload + " to finish");
            upload.waitForUploadResult();
        }
    } catch (AmazonServiceException e) {
        throw new MojoExecutionException("Error while uploading " + currentUpload);
    } catch (AmazonClientException e) {
        throw new MojoExecutionException("Error while uploading " + currentUpload);
    } catch (InterruptedException e) {
        getLog().debug("Interrupted while waiting for upload to finish");
    }
    // Check for errors that happened outside of the actual uploading
    if (erroneousUpload != null) {
        throw new MojoExecutionException("Error while uploading " + erroneousUpload);
    }
    getLog().info("Deployed " + items + " files (" + FileUtils.byteCountToDisplaySize((int) totalBytesTransferred) + ") to s3://" + bucket);
}
Example 32
Project: liferay-portal-master  File: S3Store.java View source code
protected TransferManager getTransferManager(AmazonS3 amazonS3) {
    ExecutorService executorService = new ThreadPoolExecutor(_s3StoreConfiguration.corePoolSize(), _s3StoreConfiguration.maxPoolSize());
    TransferManager transferManager = new TransferManager(amazonS3, executorService, false);
    TransferManagerConfiguration transferManagerConfiguration = new TransferManagerConfiguration();
    transferManagerConfiguration.setMinimumUploadPartSize(_s3StoreConfiguration.minimumUploadPartSize());
    transferManagerConfiguration.setMultipartUploadThreshold(_s3StoreConfiguration.multipartUploadThreshold());
    transferManager.setConfiguration(transferManagerConfiguration);
    return transferManager;
}
Example 33
Project: ecs-sync-master  File: AwsS3Storage.java View source code
@Override
void putObject(SyncObject obj, String targetKey) {
    ObjectMetadata om;
    if (options.isSyncMetadata())
        om = s3MetaFromSyncMeta(obj.getMetadata());
    else
        om = new ObjectMetadata();
    if (obj.getMetadata().isDirectory())
        om.setContentType(TYPE_DIRECTORY);
    PutObjectRequest req;
    File file = (File) obj.getProperty(AbstractFilesystemStorage.PROP_FILE);
    S3ProgressListener progressListener = null;
    if (obj.getMetadata().isDirectory()) {
        req = new PutObjectRequest(config.getBucketName(), targetKey, new ByteArrayInputStream(new byte[0]), om);
    } else if (file != null) {
        req = new PutObjectRequest(config.getBucketName(), targetKey, file).withMetadata(om);
        progressListener = new ByteTransferListener(obj);
    } else {
        InputStream stream = obj.getDataStream();
        if (options.isMonitorPerformance())
            stream = new ProgressInputStream(stream, new PerformanceListener(getWriteWindow()));
        req = new PutObjectRequest(config.getBucketName(), targetKey, stream, om);
    }
    if (options.isSyncAcl())
        req.setAccessControlList(s3AclFromSyncAcl(obj.getAcl(), options.isIgnoreInvalidAcls()));
    // xfer manager will figure out if MPU is needed (based on threshold), do the MPU if necessary,
    // and abort if it fails
    TransferManagerConfiguration xferConfig = new TransferManagerConfiguration();
    xferConfig.setMultipartUploadThreshold((long) config.getMpuThresholdMb() * 1024 * 1024);
    xferConfig.setMinimumUploadPartSize((long) config.getMpuPartSizeMb() * 1024 * 1024);
    TransferManager xferManager = new TransferManager(s3, Executors.newFixedThreadPool(config.getMpuThreadCount()));
    xferManager.setConfiguration(xferConfig);
    // directly update
    final Upload upload = xferManager.upload(req, progressListener);
    try {
        String eTag = time(new Callable<String>() {

            @Override
            public String call() throws Exception {
                return upload.waitForUploadResult().getETag();
            }
        }, OPERATION_MPU);
        log.debug("Wrote {}, etag: {}", targetKey, eTag);
    } catch (Exception e) {
        if (e instanceof RuntimeException)
            throw (RuntimeException) e;
        throw new RuntimeException("upload thread was interrupted", e);
    }
}
Example 34
Project: hadoop-master  File: S3AFileSystem.java View source code
/** Called after a new FileSystem instance is constructed.
   * @param name a uri whose authority section names the host, port, etc.
   *   for this FileSystem
   * @param conf the configuration
   */
public void initialize(URI name, Configuration conf) throws IOException {
    super.initialize(name, conf);
    uri = URI.create(name.getScheme() + "://" + name.getAuthority());
    workingDir = new Path("/user", System.getProperty("user.name")).makeQualified(this.uri, this.getWorkingDirectory());
    // Try to get our credentials or just connect anonymously
    String accessKey = conf.get(ACCESS_KEY, null);
    String secretKey = conf.get(SECRET_KEY, null);
    String userInfo = name.getUserInfo();
    if (userInfo != null) {
        int index = userInfo.indexOf(':');
        if (index != -1) {
            accessKey = userInfo.substring(0, index);
            secretKey = userInfo.substring(index + 1);
        } else {
            accessKey = userInfo;
        }
    }
    AWSCredentialsProviderChain credentials = new AWSCredentialsProviderChain(new BasicAWSCredentialsProvider(accessKey, secretKey), new InstanceProfileCredentialsProvider(), new AnonymousAWSCredentialsProvider());
    bucket = name.getHost();
    ClientConfiguration awsConf = new ClientConfiguration();
    awsConf.setMaxConnections(conf.getInt(MAXIMUM_CONNECTIONS, DEFAULT_MAXIMUM_CONNECTIONS));
    boolean secureConnections = conf.getBoolean(SECURE_CONNECTIONS, DEFAULT_SECURE_CONNECTIONS);
    awsConf.setProtocol(secureConnections ? Protocol.HTTPS : Protocol.HTTP);
    awsConf.setMaxErrorRetry(conf.getInt(MAX_ERROR_RETRIES, DEFAULT_MAX_ERROR_RETRIES));
    awsConf.setConnectionTimeout(conf.getInt(ESTABLISH_TIMEOUT, DEFAULT_ESTABLISH_TIMEOUT));
    awsConf.setSocketTimeout(conf.getInt(SOCKET_TIMEOUT, DEFAULT_SOCKET_TIMEOUT));
    String signerOverride = conf.getTrimmed(SIGNING_ALGORITHM, "");
    if (!signerOverride.isEmpty()) {
        awsConf.setSignerOverride(signerOverride);
    }
    String proxyHost = conf.getTrimmed(PROXY_HOST, "");
    int proxyPort = conf.getInt(PROXY_PORT, -1);
    if (!proxyHost.isEmpty()) {
        awsConf.setProxyHost(proxyHost);
        if (proxyPort >= 0) {
            awsConf.setProxyPort(proxyPort);
        } else {
            if (secureConnections) {
                LOG.warn("Proxy host set without port. Using HTTPS default 443");
                awsConf.setProxyPort(443);
            } else {
                LOG.warn("Proxy host set without port. Using HTTP default 80");
                awsConf.setProxyPort(80);
            }
        }
        String proxyUsername = conf.getTrimmed(PROXY_USERNAME);
        String proxyPassword = conf.getTrimmed(PROXY_PASSWORD);
        if ((proxyUsername == null) != (proxyPassword == null)) {
            String msg = "Proxy error: " + PROXY_USERNAME + " or " + PROXY_PASSWORD + " set without the other.";
            LOG.error(msg);
            throw new IllegalArgumentException(msg);
        }
        awsConf.setProxyUsername(proxyUsername);
        awsConf.setProxyPassword(proxyPassword);
        awsConf.setProxyDomain(conf.getTrimmed(PROXY_DOMAIN));
        awsConf.setProxyWorkstation(conf.getTrimmed(PROXY_WORKSTATION));
        if (LOG.isDebugEnabled()) {
            LOG.debug("Using proxy server {}:{} as user {} with password {} on " + "domain {} as workstation {}", awsConf.getProxyHost(), awsConf.getProxyPort(), String.valueOf(awsConf.getProxyUsername()), awsConf.getProxyPassword(), awsConf.getProxyDomain(), awsConf.getProxyWorkstation());
        }
    } else if (proxyPort >= 0) {
        String msg = "Proxy error: " + PROXY_PORT + " set without " + PROXY_HOST;
        LOG.error(msg);
        throw new IllegalArgumentException(msg);
    }
    s3 = new AmazonS3Client(credentials, awsConf);
    String endPoint = conf.getTrimmed(ENDPOINT, "");
    if (!endPoint.isEmpty()) {
        try {
            s3.setEndpoint(endPoint);
        } catch (IllegalArgumentException e) {
            String msg = "Incorrect endpoint: " + e.getMessage();
            LOG.error(msg);
            throw new IllegalArgumentException(msg, e);
        }
    }
    maxKeys = conf.getInt(MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS);
    partSize = conf.getLong(MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE);
    multiPartThreshold = conf.getLong(MIN_MULTIPART_THRESHOLD, DEFAULT_MIN_MULTIPART_THRESHOLD);
    if (partSize < 5 * 1024 * 1024) {
        LOG.error(MULTIPART_SIZE + " must be at least 5 MB");
        partSize = 5 * 1024 * 1024;
    }
    if (multiPartThreshold < 5 * 1024 * 1024) {
        LOG.error(MIN_MULTIPART_THRESHOLD + " must be at least 5 MB");
        multiPartThreshold = 5 * 1024 * 1024;
    }
    int maxThreads = conf.getInt(MAX_THREADS, DEFAULT_MAX_THREADS);
    int coreThreads = conf.getInt(CORE_THREADS, DEFAULT_CORE_THREADS);
    if (maxThreads == 0) {
        maxThreads = Runtime.getRuntime().availableProcessors() * 8;
    }
    if (coreThreads == 0) {
        coreThreads = Runtime.getRuntime().availableProcessors() * 8;
    }
    long keepAliveTime = conf.getLong(KEEPALIVE_TIME, DEFAULT_KEEPALIVE_TIME);
    LinkedBlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>(maxThreads * conf.getInt(MAX_TOTAL_TASKS, DEFAULT_MAX_TOTAL_TASKS));
    threadPoolExecutor = new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, newDaemonThreadFactory("s3a-transfer-shared-"));
    threadPoolExecutor.allowCoreThreadTimeOut(true);
    TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
    transferConfiguration.setMinimumUploadPartSize(partSize);
    transferConfiguration.setMultipartUploadThreshold(multiPartThreshold);
    transfers = new TransferManager(s3, threadPoolExecutor);
    transfers.setConfiguration(transferConfiguration);
    String cannedACLName = conf.get(CANNED_ACL, DEFAULT_CANNED_ACL);
    if (!cannedACLName.isEmpty()) {
        cannedACL = CannedAccessControlList.valueOf(cannedACLName);
    } else {
        cannedACL = null;
    }
    if (!s3.doesBucketExist(bucket)) {
        throw new IOException("Bucket " + bucket + " does not exist");
    }
    boolean purgeExistingMultipart = conf.getBoolean(PURGE_EXISTING_MULTIPART, DEFAULT_PURGE_EXISTING_MULTIPART);
    long purgeExistingMultipartAge = conf.getLong(PURGE_EXISTING_MULTIPART_AGE, DEFAULT_PURGE_EXISTING_MULTIPART_AGE);
    if (purgeExistingMultipart) {
        Date purgeBefore = new Date(new Date().getTime() - purgeExistingMultipartAge * 1000);
        transfers.abortMultipartUploads(bucket, purgeBefore);
    }
    serverSideEncryptionAlgorithm = conf.get(SERVER_SIDE_ENCRYPTION_ALGORITHM);
    setConf(conf);
}
Example 35
Project: HERD-master  File: S3DaoImpl.java View source code
@Override
public int abortMultipartUploads(S3FileTransferRequestParamsDto params, Date thresholdDate) {
    // Create an Amazon S3 client.
    AmazonS3Client s3Client = getAmazonS3(params);
    int abortedMultipartUploadsCount = 0;
    try {
        // List upload markers. Null implies initial list request.
        String uploadIdMarker = null;
        String keyMarker = null;
        boolean truncated;
        do {
            // Create the list multipart request, optionally using the last markers.
            ListMultipartUploadsRequest request = new ListMultipartUploadsRequest(params.getS3BucketName());
            request.setUploadIdMarker(uploadIdMarker);
            request.setKeyMarker(keyMarker);
            // Request the multipart upload listing.
            MultipartUploadListing uploadListing = s3Operations.listMultipartUploads(TransferManager.appendSingleObjectUserAgent(request), s3Client);
            for (MultipartUpload upload : uploadListing.getMultipartUploads()) {
                if (upload.getInitiated().compareTo(thresholdDate) < 0) {
                    // Abort the upload.
                    s3Operations.abortMultipartUpload(TransferManager.appendSingleObjectUserAgent(new AbortMultipartUploadRequest(params.getS3BucketName(), upload.getKey(), upload.getUploadId())), s3Client);
                    // Log the information about the aborted multipart upload.
                    LOGGER.info("Aborted S3 multipart upload. s3Key=\"{}\" s3BucketName=\"{}\" s3MultipartUploadInitiatedDate=\"{}\"", upload.getKey(), params.getS3BucketName(), upload.getInitiated());
                    // Increment the counter.
                    abortedMultipartUploadsCount++;
                }
            }
            // Determine whether there are more uploads to list.
            truncated = uploadListing.isTruncated();
            if (truncated) {
                // Record the list markers.
                uploadIdMarker = uploadListing.getNextUploadIdMarker();
                keyMarker = uploadListing.getNextKeyMarker();
            }
        } while (truncated);
    } finally {
        // Shutdown the Amazon S3 client instance to release resources.
        s3Client.shutdown();
    }
    return abortedMultipartUploadsCount;
}
Example 36
Project: hadoop-release-2.6.0-master  File: S3AFileSystem.java View source code
private void initTransferManager() {
    TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
    transferConfiguration.setMinimumUploadPartSize(partSize);
    transferConfiguration.setMultipartUploadThreshold(multiPartThreshold);
    transferConfiguration.setMultipartCopyPartSize(partSize);
    transferConfiguration.setMultipartCopyThreshold(multiPartThreshold);
    transfers = new TransferManager(s3, unboundedThreadPool);
    transfers.setConfiguration(transferConfiguration);
}
Example 37
Project: xmlsh-master  File: S3Client.java View source code
public TransferManager getTransferManager() {
    if (tm == null)
        tm = new TransferManager(mClient, createDefaultExecutorService());
    return tm;
}
Example 38
Project: beanstalker-master  File: BeanstalkerS3Client.java View source code
protected void init(Region region) {
    transferManager = new TransferManager(this);
    TransferManagerConfiguration configuration = new TransferManagerConfiguration();
    configuration.setMultipartUploadThreshold(100 * Constants.KB);
    transferManager.setConfiguration(configuration);
    this.setRegion(region);
}
Example 39
Project: deeplearning4j-master  File: S3Downloader.java View source code
public MultipleFileDownload downloadFolder(String bucketName, String keyPrefix, File folderPath) {
    TransferManager transfer = new TransferManager(getClient());
    return transfer.downloadDirectory(bucketName, keyPrefix, folderPath);
}
Example 40
Project: NBS3Sync-master  File: S3FileManager.java View source code
protected void syncEntireBucket() {
    logger.info("Syncing entire bucket...");
    TransferManager manager = new TransferManager(config.getAWSCredentials());
    manager.downloadDirectory(config.getBucketName(), null, config.getBaseDir().toFile());
}
Example 41
Project: xmlsh1_3-master  File: AWSS3Command.java View source code
protected TransferManager getTransferManager() {
    if (tm == null)
        tm = new TransferManager(mAmazon, createDefaultExecutorService());
    return tm;
}