Java Examples for com.amazonaws.auth.AWSCredentials

The following java examples will help you to understand the usage of com.amazonaws.auth.AWSCredentials. These source code samples are taken from different open source projects.

Example 1
Project: sequenceiq-samples-master  File: SimpleAutoScalingService.java View source code
@Override
public List<LaunchConfiguration> describeAmazonLaunchConfigurations(AWSCredentials credentials) {
    AmazonAutoScalingClient amazonAutoScalingClient = amazonAutoScalingClientFactory.createAmazonAutoScalingClient(credentials);
    DescribeLaunchConfigurationsResult describeLaunchConfigurationsResult = amazonAutoScalingClient.describeLaunchConfigurations();
    return describeLaunchConfigurationsResult.getLaunchConfigurations();
}
Example 2
Project: tapestry-aws-core-master  File: AWSCoreModule.java View source code
public static void bind(ServiceBinder binder) {
    //binder.bind(AWSMailTransport.class,AWSMailTransportImpl.class);
    binder.bind(AmazonS3.class, new ServiceBuilder<AmazonS3>() {

        public AmazonS3 buildService(ServiceResources serviceResources) {
            return new AmazonS3Client(serviceResources.getService(AWSCredentials.class));
        }
    });
    binder.bind(AmazonDynamoDB.class, new ServiceBuilder<AmazonDynamoDB>() {

        public AmazonDynamoDB buildService(ServiceResources serviceResources) {
            return new AmazonDynamoDBClient(serviceResources.getService(AWSCredentials.class));
        }
    });
    binder.bind(AmazonEC2.class, new ServiceBuilder<AmazonEC2>() {

        public AmazonEC2 buildService(ServiceResources serviceResources) {
            return new AmazonEC2Client(serviceResources.getService(AWSCredentials.class));
        }
    });
    binder.bind(AmazonSimpleDB.class, new ServiceBuilder<AmazonSimpleDB>() {

        public AmazonSimpleDB buildService(ServiceResources serviceResources) {
            return new AmazonSimpleDBClient(serviceResources.getService(AWSCredentials.class));
        }
    });
    binder.bind(AmazonSQS.class, new ServiceBuilder<AmazonSQS>() {

        public AmazonSQS buildService(ServiceResources serviceResources) {
            return new AmazonSQSClient(serviceResources.getService(AWSCredentials.class));
        }
    });
    binder.bind(AmazonSNS.class, new ServiceBuilder<AmazonSNS>() {

        public AmazonSNS buildService(ServiceResources serviceResources) {
            return new AmazonSNSClient(serviceResources.getService(AWSCredentials.class));
        }
    });
    binder.bind(AmazonRDS.class, new ServiceBuilder<AmazonRDS>() {

        public AmazonRDS buildService(ServiceResources serviceResources) {
            return new AmazonRDSClient(serviceResources.getService(AWSCredentials.class));
        }
    });
    binder.bind(AmazonElasticMapReduce.class, new ServiceBuilder<AmazonElasticMapReduce>() {

        public AmazonElasticMapReduce buildService(ServiceResources serviceResources) {
            return new AmazonElasticMapReduceClient(serviceResources.getService(AWSCredentials.class));
        }
    });
    binder.bind(AmazonSimpleEmailService.class, new ServiceBuilder<AmazonSimpleEmailService>() {

        public AmazonSimpleEmailService buildService(ServiceResources serviceResources) {
            return new AmazonSimpleEmailServiceClient(serviceResources.getService(AWSCredentials.class));
        }
    });
    binder.bind(AmazonElasticLoadBalancing.class, new ServiceBuilder<AmazonElasticLoadBalancing>() {

        public AmazonElasticLoadBalancing buildService(ServiceResources serviceResources) {
            return new AmazonElasticLoadBalancingClient(serviceResources.getService(AWSCredentials.class));
        }
    });
    binder.bind(AmazonCloudWatch.class, new ServiceBuilder<AmazonCloudWatch>() {

        public AmazonCloudWatch buildService(ServiceResources serviceResources) {
            return new AmazonCloudWatchClient(serviceResources.getService(AWSCredentials.class));
        }
    });
    binder.bind(AmazonAutoScaling.class, new ServiceBuilder<AmazonAutoScaling>() {

        public AmazonAutoScaling buildService(ServiceResources serviceResources) {
            return new AmazonAutoScalingClient(serviceResources.getService(AWSCredentials.class));
        }
    });
    binder.bind(AmazonIdentityManagement.class, new ServiceBuilder<AmazonIdentityManagement>() {

        public AmazonIdentityManagement buildService(ServiceResources serviceResources) {
            return new AmazonIdentityManagementClient(serviceResources.getService(AWSCredentials.class));
        }
    });
}
Example 3
Project: aws-snitch-master  File: SnitchModule.java View source code
@Override
public void configure(Binder binder) {
    try {
        HierarchicalINIConfiguration config = new HierarchicalINIConfiguration(configFile);
        String awsAccess = config.getString("aws.access");
        String awsSecret = config.getString("aws.secret");
        if ((awsAccess.length() == 0) || (awsSecret.length() == 0)) {
            binder.addError("config file did not include aws access and/or secret keys!");
        }
        AWSCredentials creds = new BasicAWSCredentials(config.getString("aws.access"), config.getString("aws.secret"));
        binder.bind(AWSCredentials.class).toInstance(creds);
        binder.bind(Configuration.class).toInstance(config);
    } catch (ConfigurationException e) {
        binder.addError("error parsing config file", e);
    }
}
Example 4
Project: DeployMan-master  File: Ec2.java View source code
@Override
public AmazonEC2 getClient() {
    AWSCredentials credentials = new Aws().getAwsCredentials();
    AmazonEC2 ec2 = new AmazonEC2Client(credentials);
    String region = getUserProperty(AWS_REGION);
    //$NON-NLS-1$ //$NON-NLS-2$
    String endpoint = "https://ec2." + region + ".amazonaws.com";
    ec2.setEndpoint(endpoint);
    return ec2;
}
Example 5
Project: kagura-master  File: GroovyDataReportConnectorTest.java View source code
@Test
public void awsImportTest() {
    GroovyDataReportConnector groovyDataReportConnector = new GroovyDataReportConnector(new GroovyReportConfig() {

        {
            setColumns(new ArrayList<ColumnDef>());
            setExtraOptions(new HashMap<String, String>());
            setGroovy("@GrabResolver(name = 'mvnrepository', root = 'http://repo1.maven.org/maven2')\n" + "@Grapes(\n" + "        @Grab(group='com.amazonaws', module='aws-java-sdk', version='1.7.9')\n" + ")\n" + "import com.amazonaws.auth.AWSCredentials\n" + "import com.amazonaws.auth.BasicAWSCredentials\n" + "\n" + "AWSCredentials awsCredentials = new BasicAWSCredentials(\"\",\"\");\n" + "\n" + "");
            setParamConfig(new ArrayList<ParamConfig>());
            setReportId("test");
        }
    });
    groovyDataReportConnector.run(new HashMap<String, Object>());
}
Example 6
Project: load_web_tests_based_on_cloud_computing-master  File: Instance_runner.java View source code
private static void initialize_aws_variables() throws Exception {
    AWSCredentials credentials = new PropertiesCredentials(Instance_runner.class.getResourceAsStream("../../AwsCredentials.properties"));
    ec2 = new AmazonEC2Client(credentials);
    ec2.setEndpoint("ec2.eu-west-1.amazonaws.com");
    image_id = "ami-4090a634";
    image_ids = new ArrayList<String>();
    image_ids.add(image_id);
    describe_images_request = new DescribeImagesRequest();
    describe_images_request.setImageIds(image_ids);
    describe_images_result = ec2.describeImages(describe_images_request);
    images = describe_images_result.getImages();
    run_instances_request = new RunInstancesRequest(image_id, 1, 1);
}
Example 7
Project: rainbownlp-master  File: AmazonEMRManager.java View source code
public void runOnEMR(List<HadoopJarStepConfig> steps) {
    AWSCredentials credentials = null;
    try {
        credentials = new PropertiesCredentials(AmazonEMRManager.class.getResourceAsStream("AwsCredentials.properties"));
    } catch (IOException e1) {
        System.out.println("Credentials were not properly entered into AwsCredentials.properties.");
        System.out.println(e1.getMessage());
        System.exit(-1);
    }
    AmazonElasticMapReduce client = new AmazonElasticMapReduceClient(credentials);
    List<StepConfig> stepsConfig = new ArrayList<StepConfig>();
    int counter = 0;
    for (HadoopJarStepConfig step : steps) {
        counter++;
        stepsConfig.add(new StepConfig("Step" + counter, step));
    }
    AddJobFlowStepsResult result = client.addJobFlowSteps(new AddJobFlowStepsRequest().withJobFlowId("j-1HTE8WKS7SODR").withSteps(stepsConfig));
    System.out.println(result.getStepIds());
}
Example 8
Project: vpc2vpc-master  File: VPCHelper.java View source code
public HashMap<Region, List> listRegionVpcs(AWSCredentials awsCreds) {
    AmazonEC2Client ec2Client = new AmazonEC2Client(awsCreds);
    List<Region> regions = new ArrayList();
    DescribeRegionsResult descRegionsResult = ec2Client.describeRegions();
    if (descRegionsResult != null) {
        regions = descRegionsResult.getRegions();
    }
    HashMap<Region, List> regionVpcs = new HashMap();
    ExecutorService listVPCExecutor = Executors.newFixedThreadPool(8);
    for (Region region : regions) {
        List<Vpc> vpcs = new ArrayList();
        regionVpcs.put(region, vpcs);
        Runnable worker = new ListVPCRunnable(awsCreds, region, vpcs);
        listVPCExecutor.execute(worker);
    }
    listVPCExecutor.shutdown();
    try {
        listVPCExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
    } catch (InterruptedException e) {
        LOG.error("Caught InterruptedException: " + e.getMessage());
    }
    return regionVpcs;
}
Example 9
Project: xmlsh-master  File: AWSOptionsCredentialsProvider.java View source code
@Override
public AWSCredentials getCredentials() {
    if (accessKey != null && secretAccessKey != null) {
        if (sessionToken != null)
            return new BasicSessionCredentials(accessKey, secretAccessKey, sessionToken);
        return new BasicAWSCredentials(accessKey, secretAccessKey);
    }
    if (profileName != null)
        return new ProfileCredentialsProvider(profileName).getCredentials();
    throw new AmazonClientException("Unable to load AWS credentials from options " + "(accessKey and secretKey)");
}
Example 10
Project: aws-big-data-blog-master  File: EmrHelper.java View source code
public static void main(String[] args) {
    AWSCredentials credentials = null;
    try {
        credentials = new PropertiesCredentials(EmrHelper.class.getResourceAsStream("AwsCredentials.properties"));
    } catch (IOException e1) {
        System.out.println("Credentials were not properly entered into AwsCredentials.properties.");
        System.out.println(e1.getMessage());
        System.exit(-1);
    }
    AmazonElasticMapReduce client = new AmazonElasticMapReduceClient(credentials);
    // predefined steps. See StepFactory for list of predefined steps
    StepConfig hive = new StepConfig("Hive", new StepFactory().newInstallHiveStep());
    // A custom step
    HadoopJarStepConfig hadoopConfig1 = new HadoopJarStepConfig().withJar("s3://mybucket/my-jar-location1").withMainClass(// optional main class, this can be omitted if jar above has a manifest
    "com.my.Main1").withArgs(// optional list of arguments
    "--verbose");
    StepConfig customStep = new StepConfig("Step1", hadoopConfig1);
    AddJobFlowStepsResult result = client.addJobFlowSteps(new AddJobFlowStepsRequest().withJobFlowId("j-1HTE8WKS7SODR").withSteps(hive, customStep));
    System.out.println(result.getStepIds());
}
Example 11
Project: clouck-master  File: IamWrapperImpl.java View source code
private AmazonIdentityManagement findClient(String accessKeyId, String secretAccessKey) {
    // TODO: need to config client config parameter. ignore it for now.
    AWSCredentials credential = new BasicAWSCredentials(accessKeyId, secretAccessKey);
    AmazonIdentityManagement iam = new AmazonIdentityManagementClient(credential);
    //        iam.setEndpoint(Region.toIamEndpoint());
    return iam;
}
Example 12
Project: elasticsearch-master  File: AwsS3ServiceImplTests.java View source code
private void assertCredentials(Settings singleRepositorySettings, Settings settings, String expectedKey, String expectedSecret) {
    String configName = InternalAwsS3Service.CLIENT_NAME.get(singleRepositorySettings);
    S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, configName);
    AWSCredentials credentials = InternalAwsS3Service.buildCredentials(logger, deprecationLogger, clientSettings, singleRepositorySettings).getCredentials();
    assertThat(credentials.getAWSAccessKeyId(), is(expectedKey));
    assertThat(credentials.getAWSSecretKey(), is(expectedSecret));
}
Example 13
Project: ignite-master  File: S3CheckpointSpiSelfTest.java View source code
/**
     * @throws Exception If error.
     */
@Override
protected void afterSpiStopped() throws Exception {
    AWSCredentials cred = new BasicAWSCredentials(IgniteS3TestSuite.getAccessKey(), IgniteS3TestSuite.getSecretKey());
    AmazonS3 s3 = new AmazonS3Client(cred);
    String bucketName = S3CheckpointSpi.BUCKET_NAME_PREFIX + "unit-test-bucket";
    try {
        ObjectListing list = s3.listObjects(bucketName);
        while (true) {
            for (S3ObjectSummary sum : list.getObjectSummaries()) s3.deleteObject(bucketName, sum.getKey());
            if (list.isTruncated())
                list = s3.listNextBatchOfObjects(list);
            else
                break;
        }
    } catch (AmazonClientException e) {
        throw new IgniteSpiException("Failed to read checkpoint bucket: " + bucketName, e);
    }
}
Example 14
Project: Java-Examples-master  File: App.java View source code
/**
     * Main method for application.
     */
//CHECKSTYLE.OFF: AbbreviationAsWordInName
public static void main(String[] args) throws Exception {
    final InputStream properties = Thread.currentThread().getContextClassLoader().getResourceAsStream("aws.properties");
    final AWSCredentials propertiesCredentials = new PropertiesCredentials(properties);
    final AmazonDynamoDB amazonDynamoDb = new AmazonDynamoDBClient(propertiesCredentials);
    amazonDynamoDb.setRegion(Region.getRegion(Regions.US_WEST_2));
    final DynamoDBMapper dynamoDBMapper = new DynamoDBMapper(amazonDynamoDb);
    final UserRepository userRepository = new UserRepositoryImpl(dynamoDBMapper);
    // Save
    final User user = new User();
    user.setUsername("bhdrkn");
    user.setEmail("bhdrkn@gmail.com");
    user.setPassword("1q2w3e");
    userRepository.put(user);
    // Read
    final User readUser = userRepository.get("bhdrkn");
    assert user.equals(readUser);
    // Read All
    final List<User> users = userRepository.findAll();
    assert users.contains(user);
    // Find by email
    final List<User> findUsers = userRepository.findByEmail("bhdrkn@gmail.com");
    assert findUsers.contains(user);
    // Find by email
    final List<User> notFound = userRepository.findByEmail("asdf");
    assert notFound.isEmpty();
    // Delete
    userRepository.delete("bhdrkn");
    final List<User> all = userRepository.findAll();
    assert all.isEmpty();
    System.out.println("Finished!");
}
Example 15
Project: micro-server-master  File: S3UploadSystemTest.java View source code
private static TransferManager createManager() {
    AWSCredentials credentials = new AWSCredentials() {

        @Override
        public String getAWSAccessKeyId() {
            return System.getProperty("s3.accessKey");
        }

        @Override
        public String getAWSSecretKey() {
            return System.getProperty("s3.secretKey");
        }
    };
    return new TransferManager(credentials);
}
Example 16
Project: monitor-event-tap-master  File: TestCloudWatchUpdater.java View source code
@BeforeClass(groups = "aws")
@Parameters("aws-credentials-file")
public void setUp(String awsCredentialsFile) throws Exception {
    Properties properties = new Properties();
    properties.load(new FileInputStream(awsCredentialsFile));
    String awsAccessKey = properties.getProperty("aws.access-key");
    String awsSecretKey = properties.getProperty("aws.secret-key");
    AWSCredentials awsCredentials = new BasicAWSCredentials(awsAccessKey, awsSecretKey);
    executor = Executors.newSingleThreadScheduledExecutor(new ThreadFactoryBuilder().setDaemon(true).build());
    cloudWatchUpdater = new CloudWatchUpdater(new AmazonConfig(), new AmazonCloudWatchClient(awsCredentials), executor, new NodeInfo("test"));
}
Example 17
Project: ORCID-Source-master  File: S3Utils.java View source code
//Create S3 buckets for a given prefix
public static void createBuckets(String bucketPrefix, String accessKey, String secretKey) {
    AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
    AmazonS3 s3 = new AmazonS3Client(credentials);
    String api12JsonPrefix = bucketPrefix + "-api-1-2-json-";
    String api12XMLPrefix = bucketPrefix + "-api-1-2-xml-";
    String api20JsonPrefix = bucketPrefix + "-api-2-0-json-";
    String api20XMLPrefix = bucketPrefix + "-api-2-0-xml-";
    for (int i = 0; i <= 10; i++) {
        char lastCharacter = (i == 10 ? 'x' : Character.forDigit(i, 10));
        if (!s3.doesBucketExist(api12JsonPrefix + lastCharacter)) {
            s3.createBucket((api12JsonPrefix + lastCharacter), Region.EU_Ireland);
        }
        if (!s3.doesBucketExist(api12XMLPrefix + lastCharacter)) {
            s3.createBucket((api12XMLPrefix + lastCharacter), Region.EU_Ireland);
        }
        if (!s3.doesBucketExist(api20JsonPrefix + lastCharacter)) {
            s3.createBucket((api20JsonPrefix + lastCharacter), Region.EU_Ireland);
        }
        if (!s3.doesBucketExist(api20XMLPrefix + lastCharacter)) {
            s3.createBucket((api20XMLPrefix + lastCharacter), Region.EU_Ireland);
        }
    }
}
Example 18
Project: S3-Blobs-module-for-Play-master  File: S3Blobs.java View source code
@Override
public void onApplicationStart() {
    if (!ConfigHelper.getBoolean("s3.storage.enabled", true)) {
        Logger.info("S3Blobs module disabled");
        return;
    }
    Logger.info("Starting the S3Blobs module");
    if (!Play.configuration.containsKey("aws.access.key")) {
        throw new ConfigurationException("Bad configuration for s3: no access key");
    } else if (!Play.configuration.containsKey("aws.secret.key")) {
        throw new ConfigurationException("Bad configuration for s3: no secret key");
    } else if (!Play.configuration.containsKey("s3.bucket")) {
        throw new ConfigurationException("Bad configuration for s3: no s3 bucket");
    }
    S3Blob.s3Bucket = Play.configuration.getProperty("s3.bucket");
    S3Blob.serverSideEncryption = ConfigHelper.getBoolean("s3.useServerSideEncryption", false);
    String accessKey = Play.configuration.getProperty("aws.access.key");
    String secretKey = Play.configuration.getProperty("aws.secret.key");
    AWSCredentials awsCredentials = new BasicAWSCredentials(accessKey, secretKey);
    S3Blob.s3Client = new AmazonS3Client(awsCredentials);
    if (!S3Blob.s3Client.doesBucketExist(S3Blob.s3Bucket)) {
        S3Blob.s3Client.createBucket(S3Blob.s3Bucket);
    }
}
Example 19
Project: Scribengin-master  File: S3TestModule.java View source code
/*
   * (non-Javadoc)
   * 
   * @see com.google.inject.AbstractModule#configure()
   */
@Override
protected void configure() {
    if (mock) {
        AWSCredentials credentials = new BasicAWSCredentials("", "");
        AmazonS3Mock amazonS3Mock = new AmazonS3Mock(credentials);
        amazonS3Mock.createBucket(descriptor.get("bucketName"));
        bind(AmazonS3.class).toInstance(amazonS3Mock);
    } else {
        AWSCredentials credentials = null;
        try {
            credentials = new ProfileCredentialsProvider().getCredentials();
        } catch (Exception e) {
            throw new AmazonClientException("Cannot load the credentials from the credential profiles file. ", e);
        }
        bind(AmazonS3.class).toInstance(new AmazonS3Client(credentials));
    }
    System.out.println("region " + s3SinkConfig.getRegionName());
    SinkPartitioner sp = new OffsetPartitioner(s3SinkConfig.getOffsetPerPartition());
    bind(SinkPartitioner.class).toInstance(sp);
    bind(S3SinkConfig.class).toInstance(s3SinkConfig);
}
Example 20
Project: suro-master  File: TestS3FileSink.java View source code
@Override
protected void configure() {
    bind(ObjectMapper.class).to(DefaultObjectMapper.class);
    bind(AWSCredentialsProvider.class).toInstance(new AWSCredentialsProvider() {

        @Override
        public AWSCredentials getCredentials() {
            return new AWSCredentials() {

                @Override
                public String getAWSAccessKeyId() {
                    return "accessKey";
                }

                @Override
                public String getAWSSecretKey() {
                    return "secretKey";
                }
            };
        }

        @Override
        public void refresh() {
        }
    });
    MultipartUtils mpUtils = mock(MultipartUtils.class);
    try {
        doAnswer(new Answer() {

            @Override
            public Object answer(InvocationOnMock invocation) throws Throwable {
                Thread.sleep(1000);
                return null;
            }
        }).when(mpUtils).uploadObjects(any(String.class), any(RestS3Service.class), any(List.class), any(S3ServiceEventListener.class));
        bind(MultipartUtils.class).toInstance(mpUtils);
    } catch (Exception e) {
        Assert.fail(e.getMessage());
    }
    bind(SpaceChecker.class).toInstance(mock(SpaceChecker.class));
}
Example 21
Project: terraform-master  File: CredentialsAWS.java View source code
//----------------------------------------------------------------------------------------------
private AWSCredentials createBasicAWSCredentials() throws NullPointerException {
    AWSCredentials result;
    if (accessKey == null || "".equals(accessKey)) {
        log.error("access key for Credentials " + getName() + " is empty!");
        throw new NullPointerException("Empty Access Key");
    }
    if (secretKey == null || "".equals(secretKey)) {
        log.error("secret key for Credentials " + getName() + " is empty!");
        throw new NullPointerException("Empty Secret Key");
    }
    result = new BasicAWSCredentials(accessKey, secretKey);
    return result;
}
Example 22
Project: xebia-cloudcomputing-extras-master  File: CreateNginxProxyServers.java View source code
public static void main(String[] args) {
    AWSCredentials awsCredentials = AmazonAwsUtils.loadAwsCredentials();
    AmazonEC2 ec2 = new AmazonEC2Client(awsCredentials);
    ec2.setEndpoint("ec2.eu-west-1.amazonaws.com");
    AmazonRoute53 route53 = new AmazonRoute53Client(awsCredentials);
    WorkshopInfrastructure workshopInfrastructure = new WorkshopInfrastructure().withTeamIdentifiers("1").withAwsAccessKeyId(awsCredentials.getAWSAccessKeyId()).withAwsSecretKey(awsCredentials.getAWSSecretKey()).withKeyPairName("nginx-workshop").withBeanstalkNotificationEmail("slemesle@xebia.fr");
    CreateNginxProxyServers job = new CreateNginxProxyServers(ec2, route53, workshopInfrastructure);
    job.run();
/*
        List<Reservation> reservations = ec2.describeInstances(new DescribeInstancesRequest().withInstanceIds("i-7741eb3f")).getReservations();
        Instance instance = Iterables.getOnlyElement(Iterables.getOnlyElement(reservations).getInstances());

        Map<String, Instance> instancesByTeamId = Collections.singletonMap("clc", instance);

        job.bindInstancesToDnsCnames(instancesByTeamId, route53);
*/
}
Example 23
Project: aws-device-farm-gradle-plugin-master  File: DeviceFarmClientFactory.java View source code
public AWSDeviceFarmClient initializeApiClient(final DeviceFarmExtension extension) {
    final String roleArn = extension.getAuthentication().getRoleArn();
    AWSCredentials credentials = extension.getAuthentication();
    if (roleArn != null) {
        final STSAssumeRoleSessionCredentialsProvider sts = new STSAssumeRoleSessionCredentialsProvider.Builder(roleArn, RandomStringUtils.randomAlphanumeric(8)).build();
        credentials = sts.getCredentials();
    }
    final ClientConfiguration clientConfiguration = new ClientConfiguration().withUserAgent(String.format(extension.getUserAgent(), pluginVersion));
    AWSDeviceFarmClient apiClient = new AWSDeviceFarmClient(credentials, clientConfiguration);
    apiClient.setServiceNameIntern("devicefarm");
    if (extension.getEndpointOverride() != null) {
        apiClient.setEndpoint(extension.getEndpointOverride());
    }
    return apiClient;
}
Example 24
Project: aws-iam-ldap-bridge-master  File: IAMSecretKeyValidator.java View source code
@Override
public boolean verifyIAMPassword(Entry user, String pw) throws LdapInvalidAttributeValueException, LdapAuthenticationException {
    boolean role = false;
    AWSCredentials creds;
    if (isRole(user)) {
        role = true;
        String[] parts = pw.split("\\|");
        if (parts == null || parts.length < 3)
            throw new LdapAuthenticationException();
        creds = new BasicSessionCredentials(parts[0], parts[1], parts[2]);
    } else {
        creds = new BasicAWSCredentials(user.get("accessKey").getString(), pw);
    }
    LOG.debug("Verifying {} {} with accessKey <hidden> and secretKey <hidden>", role ? "role" : "user", user.get("uid").getString());
    AmazonIdentityManagementClient client = new AmazonIdentityManagementClient(creds);
    try {
        client.getAccountSummary();
    } catch (AmazonClientException e) {
        System.err.println(e.getMessage());
        return false;
    } finally {
        client.shutdown();
    }
    return true;
}
Example 25
Project: aws-java-sdk-master  File: AwsConsoleApp.java View source code
/**
     * The only information needed to create a client are security credentials
     * consisting of the AWS Access Key ID and Secret Access Key. All other
     * configuration, such as the service endpoints, are performed
     * automatically. Client parameters, such as proxies, can be specified in an
     * optional ClientConfiguration object when constructing a client.
     *
     * @see com.amazonaws.auth.BasicAWSCredentials
     * @see com.amazonaws.auth.PropertiesCredentials
     * @see com.amazonaws.ClientConfiguration
     */
private static void init() throws Exception {
    /*
         * The ProfileCredentialsProvider will return your [default]
         * credential profile by reading from the credentials file located at
         * (~/.aws/credentials).
         */
    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider().getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (~/.aws/credentials), and is in valid format.", e);
    }
    ec2 = new AmazonEC2Client(credentials);
    s3 = new AmazonS3Client(credentials);
    sdb = new AmazonSimpleDBClient(credentials);
}
Example 26
Project: aws-sdk-java-master  File: AwsConsoleApp.java View source code
/**
     * The only information needed to create a client are security credentials
     * consisting of the AWS Access Key ID and Secret Access Key. All other
     * configuration, such as the service endpoints, are performed
     * automatically. Client parameters, such as proxies, can be specified in an
     * optional ClientConfiguration object when constructing a client.
     *
     * @see com.amazonaws.auth.BasicAWSCredentials
     * @see com.amazonaws.auth.PropertiesCredentials
     * @see com.amazonaws.ClientConfiguration
     */
private static void init() throws Exception {
    /*
         * The ProfileCredentialsProvider will return your [default]
         * credential profile by reading from the credentials file located at
         * (~/.aws/credentials).
         */
    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider().getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (~/.aws/credentials), and is in valid format.", e);
    }
    ec2 = new AmazonEC2Client(credentials);
    s3 = new AmazonS3Client(credentials);
    sdb = new AmazonSimpleDBClient(credentials);
}
Example 27
Project: aws-toolkit-eclipse-master  File: AwsConsoleApp.java View source code
/**
     * The only information needed to create a client are security credentials
     * consisting of the AWS Access Key ID and Secret Access Key. All other
     * configuration, such as the service endpoints, are performed
     * automatically. Client parameters, such as proxies, can be specified in an
     * optional ClientConfiguration object when constructing a client.
     *
     * @see com.amazonaws.auth.BasicAWSCredentials
     * @see com.amazonaws.auth.PropertiesCredentials
     * @see com.amazonaws.ClientConfiguration
     */
private static void init() throws Exception {
    /*
         * The ProfileCredentialsProvider will return your [default]
         * credential profile by reading from the credentials file located at
         * (~/.aws/credentials).
         */
    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider().getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (~/.aws/credentials), and is in valid format.", e);
    }
    ec2 = new AmazonEC2Client(credentials);
    s3 = new AmazonS3Client(credentials);
    sdb = new AmazonSimpleDBClient(credentials);
}
Example 28
Project: cloudpier-core-master  File: BeanstalkFirstDeployment.java View source code
public void actionPerformed(ActionEvent ae) {
    JFileChooser fileChooser = new JFileChooser();
    int showOpenDialog = fileChooser.showOpenDialog(frame);
    if (showOpenDialog != JFileChooser.APPROVE_OPTION) {
        return;
    }
    //TODO if update it must not be called
    //if bucket does not exist create
    createAmazonS3Bucket();
    //TODO. better take name from s3 not local
    war_name_on_s3 = fileChooser.getSelectedFile().getName();
    System.out.println("war_name_on_s3=" + war_name_on_s3);
    ProgressListener progressListener = new ProgressListener() {

        public void progressChanged(ProgressEvent progressEvent) {
            if (upload == null) {
                return;
            }
            pb.setValue((int) upload.getProgress().getPercentTransfered());
            switch(progressEvent.getEventCode()) {
                case ProgressEvent.COMPLETED_EVENT_CODE:
                    pb.setValue(100);
                    break;
                case ProgressEvent.FAILED_EVENT_CODE:
                    try {
                        AmazonClientException e = upload.waitForException();
                        JOptionPane.showMessageDialog(frame, "Unable to upload file to Amazon S3: " + e.getMessage(), "Error Uploading File", JOptionPane.ERROR_MESSAGE);
                    } catch (InterruptedException e) {
                    }
                    break;
            }
        }
    };
    ///another example(no JFrame)
    /*
             *
            AWSCredentials myCredentials = new BasicAWSCredentials(...);
            TransferManager tx = new TransferManager(myCredentials);
            Upload myUpload = tx.upload(myBucket, myFile.getName(), myFile);

            while (myUpload.isDone() == false) {
            System.out.println("Transfer: " + myUpload.getDescription());
            System.out.println("  - State: " + myUpload.getState());
            System.out.println("  - Progress: " + myUpload.getProgress().getBytesTransfered());
            // Do work while we wait for our upload to complete...
            Thread.sleep(500);
            }

             *
             */
    File fileToUpload = fileChooser.getSelectedFile();
    PutObjectRequest request = new PutObjectRequest(bucketName, fileToUpload.getName(), fileToUpload).withProgressListener(progressListener);
    //prepei na mpei me kapoio allon elegxo, me
    uploaded_to_s3 = true;
    upload = tx.upload(request);
    //arkoudia wait
    System.out.println("Starting......");
    // pause for a while
    Thread thisThread = Thread.currentThread();
    try {
        thisThread.sleep(15000);
    } catch (Throwable t) {
        throw new OutOfMemoryError("An Error has occured");
    }
    System.out.println("Ending......");
    ////////////////////////////////////starting CreateApplicationVersion
    if (uploaded_to_s3 == true) {
        System.out.println("starting CreateApplicationVersion");
        System.out.println("accessKeyId" + accessKeyId);
        System.out.println("secretAccessKey" + secretAccessKey);
        System.out.println("war_name_on_s3" + war_name_on_s3);
        System.out.println("bucketName" + bucketName);
        System.out.println("appname" + appname);
        System.out.println("appversion" + appversion);
        //CREATE APPLICATION - NO VERSION, NO WAR
        BeansCreateApplication bst_createapp = new BeansCreateApplication();
        try {
            bst_createapp.creatapp(accessKeyId, secretAccessKey, appname, "cloud4soa-created-app");
        } catch (Exception ex) {
            Logger.getLogger(BeanstalkFirstDeployment.class.getName()).log(Level.SEVERE, null, ex);
        }
        //NOW, CREATE APPLICATION VERSION
        BeansCreateApplicationVersion bst_createversion = new BeansCreateApplicationVersion();
        try {
            bst_createversion.creatappversion(accessKeyId, secretAccessKey, war_name_on_s3, bucketName, appname, appversion);
            version_updated = true;
        } catch (Exception ex) {
            Logger.getLogger(BeanstalkFirstDeployment.class.getName()).log(Level.SEVERE, null, ex);
        }
    }
    //createenviroment if not given
    if (version_updated == true) {
        BeansCreateEnvironment bst_createenvironment = new BeansCreateEnvironment();
        try {
            bst_createenvironment.createupdateenvironment(accessKeyId, secretAccessKey, environment_name, appname, appversion, "descriptionUpdateby-app:" + appname + "-version:" + appversion);
        } catch (InvalidParameterValueException ex) {
            System.out.println("Enviroment already there!Starting BeansUpdateEnvironment");
            BeansUpdateEnvironment bst_updateenvironment = new BeansUpdateEnvironment();
            try {
                bst_updateenvironment.updateenvironment(accessKeyId, secretAccessKey, environment_name, appname, appversion, "descriptionUpdateby-app:" + appname + "-version:" + appversion);
            } catch (Exception ex2) {
                Logger.getLogger(BeanstalkDeploy.class.getName()).log(Level.SEVERE, null, ex2);
            }
        }
    }
}
Example 29
Project: IRISv2-master  File: IvonaSynthesiser.java View source code
public InputStream getMP3Data(String synthText) throws IOException {
    AWSCredentials credentials = new AWSCredentials() {

        @Override
        public String getAWSAccessKeyId() {
            return cfg.get("ivonaAccessKey");
        }

        @Override
        public String getAWSSecretKey() {
            return cfg.get("ivonaSecretKey");
        }
    };
    IvonaSpeechCloudClient speechCloud = new IvonaSpeechCloudClient(credentials);
    speechCloud.setEndpoint("https://tts.eu-west-1.ivonacloud.com");
    CreateSpeechRequest createSpeechRequest = new CreateSpeechRequest();
    Input input = new Input();
    Voice voice = new Voice();
    voice.setName(cfg.get("ivonaVoice"));
    input.setData(synthText);
    createSpeechRequest.setInput(input);
    createSpeechRequest.setVoice(voice);
    InputStream in = null;
    try {
        CreateSpeechResult createSpeechResult = speechCloud.createSpeech(createSpeechRequest);
        LOGGER.debug("Success sending request:");
        LOGGER.debug(" content type:\t" + createSpeechResult.getContentType());
        LOGGER.debug(" request id:\t" + createSpeechResult.getTtsRequestId());
        LOGGER.debug(" request chars:\t" + createSpeechResult.getTtsRequestCharacters());
        LOGGER.debug(" request units:\t" + createSpeechResult.getTtsRequestUnits());
        System.out.println("\nStarting to retrieve audio stream:");
        in = createSpeechResult.getBody();
        return in;
    } finally {
        if (in != null) {
            in.close();
        }
    }
}
Example 30
Project: jenkins-deployment-dashboard-plugin-master  File: AwsKeyCredentials.java View source code
public FormValidation doTestAwsConnection(@QueryParameter("key") final String accessKey, @QueryParameter("secret") final Secret secretKey) {
    LOGGER.info("Verify AWS connection key " + accessKey);
    FormValidation validationResult;
    try {
        final AWSCredentials awsCredentials = createCredentials(accessKey, secretKey.getPlainText());
        final EC2Connector conn = new EC2Connector(new AmazonEC2Client(awsCredentials));
        validationResult = conn.areAwsCredentialsValid() ? FormValidation.ok(Messages.AwsKeyCredentials_awsConnectionSuccessful()) : FormValidation.warning(Messages.AwsKeyCredentials_awsConnectionFailed());
    } catch (Exception e) {
        LOGGER.severe(e.getMessage());
        validationResult = FormValidation.error(Messages.AwsKeyCredentials_awsConnectionCritical() + e.getMessage());
    }
    return validationResult;
}
Example 31
Project: logback-ext-master  File: AwsSupport.java View source code
public AWSCredentialsProvider getCredentials(AWSCredentials credentials) {
    return new AWSCredentialsProviderChain(new EnvironmentVariableCredentialsProvider(), new SystemPropertiesCredentialsProvider(), new StaticCredentialsProvider(credentials == null ? new NullCredentials() : credentials), new ProfileCredentialsProvider(), new InstanceProfileCredentialsProvider());
}
Example 32
Project: open-tickdb-master  File: S3TickSourceFactory.java View source code
@Override
public List<String> list() throws IOException {
    Logs.logDebug(log, "Listing s3 objects : %s", bucketName);
    AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
    AmazonS3Client client = new AmazonS3Client(credentials);
    List<String> list = new ArrayList<>();
    ObjectListing l = client.listObjects(bucketName);
    for (S3ObjectSummary s : l.getObjectSummaries()) {
        String key = s.getKey();
        list.add(key);
        Logs.logTrace(log, "S3 object found : %s", key);
    }
    Logs.logDebug(log, "Listed s3 objects : [%,3d] %s", list.size(), bucketName);
    return list;
}
Example 33
Project: primecloud-controller-master  File: AmazonAwsClientFactory.java View source code
/**
     * {@inheritDoc}
     */
@Override
public AmazonEC2 createEc2Client(String awsAccessId, String awsSecretKey) {
    AWSCredentials credentials = new BasicAWSCredentials(awsAccessId, awsSecretKey);
    ClientConfiguration configuration = createConfiguration();
    AmazonEC2 client = new AmazonEC2Client(credentials, configuration);
    if (host != null) {
        client.setEndpoint(AmazonEC2.ENDPOINT_PREFIX + "." + host);
    }
    client = new ExceptionHandleAwsClientWrapper().wrap(client);
    return client;
}
Example 34
Project: publicplay-master  File: S3Plugin.java View source code
@Override
public void onStart() {
    if (log.isDebugEnabled())
        log.debug("onStart <-");
    Configuration conf = application.configuration();
    String accessKey = conf.getString(AWS_ACCESS_KEY);
    String secretKey = conf.getString(AWS_SECRET_KEY);
    String s3Bucket = conf.getString(AWS_S3_BUCKET);
    if (log.isDebugEnabled())
        log.debug("accessKey : " + accessKey);
    if (log.isDebugEnabled())
        log.debug("s3Bucket : " + s3Bucket);
    bucket = s3Bucket;
    if ((accessKey != null) && (secretKey != null)) {
        AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
        ClientConfiguration config = new ClientConfiguration();
        String proxyHost = System.getProperty("http.proxyHost");
        String proxyPort = System.getProperty("http.proxyPort");
        if (proxyHost != null && proxyPort != null) {
            config.setProxyHost(proxyHost);
            config.setProxyPort(Integer.valueOf(proxyPort));
        }
        if (amazonS3 == null)
            amazonS3 = new AmazonS3Client(credentials, config);
        if (amazonS3.doesBucketExist(s3Bucket)) {
            if (log.isDebugEnabled())
                log.debug("bucket exists: " + s3Bucket);
        } else {
            log.debug("bucket does not exist: " + s3Bucket);
            amazonS3.createBucket(s3Bucket, Region.EU_Ireland);
            log.debug("bucket created: " + s3Bucket);
        }
        log.info("Using S3 Bucket: " + s3Bucket);
    }
}
Example 35
Project: airship-master  File: AwsProvisionerModule.java View source code
@Provides
@Singleton
public AWSCredentials provideAwsCredentials(AwsProvisionerConfig provisionerConfig) throws IOException {
    File credentialsFile = new File(provisionerConfig.getAwsCredentialsFile());
    Properties properties = new Properties();
    FileInputStream in = new FileInputStream(credentialsFile);
    try {
        properties.load(in);
    } finally {
        in.close();
    }
    String accessKey = properties.getProperty("aws.access-key");
    Preconditions.checkArgument(accessKey != null, "aws credentials file does not contain a value for aws.access-key");
    String secretKey = properties.getProperty("aws.secret-key");
    Preconditions.checkArgument(secretKey != null, "aws credentials file does not contain a value for aws.secret-key");
    return new BasicAWSCredentials(accessKey, secretKey);
}
Example 36
Project: aws-sdk-android-master  File: AWSS3V4SignerTest.java View source code
@Test
public void testSignPutObject() throws URISyntaxException {
    AWSS3V4Signer signer = new S3SignerWithDateOverride(new Date(1431115356859L));
    // THESE ARE BOGUS CREDENTIALS
    AWSCredentials credentials = new BasicAWSCredentials("AKIAJd4scjDDmxXZTESTGOZQ", "LYd/ad4scjDDmxXZTESTtRz7xdOM1SiD6");
    ByteArrayInputStream bais = new ByteArrayInputStream("content".getBytes(StringUtils.UTF8));
    ObjectMetadata om = new ObjectMetadata();
    om.setContentLength("content".getBytes(StringUtils.UTF8).length);
    PutObjectRequest por = new PutObjectRequest("test-bucket123456", "key", bais, om);
    Request<?> pr = new DefaultRequest(por, Constants.S3_SERVICE_NAME);
    pr.setContent(bais);
    pr.setResourcePath("key");
    pr.setHttpMethod(HttpMethodName.PUT);
    pr.addHeader(Headers.CONTENT_LENGTH, String.valueOf(bais.available()));
    pr.setEndpoint(new URI("https://test-bucket123456.s3-us-west-2.amazonaws.com"));
    pr.addHeader("Host", "test-bucket123456.s3-us-west-2.amazonaws.com");
    signer.sign(pr, credentials);
    assertEquals(getSignature(pr), "e0a8ac165c54dc1fc3dd987f5e00b44f1b91f3c63b05ee642432e1f3c7286d69");
}
Example 37
Project: BoxMeBackend-master  File: InlineTaggingCodeSampleApp.java View source code
/**
	 * @param args
	 */
public static void main(String[] args) {
    //============================================================================================//
    //=============================== Submitting a Request =======================================// 
    //============================================================================================//
    // Retrieves the credentials from an AWSCredentials.properties file.
    AWSCredentials credentials = null;
    try {
        credentials = new PropertiesCredentials(InlineTaggingCodeSampleApp.class.getResourceAsStream("AwsCredentials.properties"));
    } catch (IOException e1) {
        System.out.println("Credentials were not properly entered into AwsCredentials.properties.");
        System.out.println(e1.getMessage());
        System.exit(-1);
    }
    // Create the AmazonEC2Client object so we can call various APIs.
    AmazonEC2 ec2 = new AmazonEC2Client(credentials);
    // Initializes a Spot Instance Request
    RequestSpotInstancesRequest requestRequest = new RequestSpotInstancesRequest();
    // Request 1 x t1.micro instance with a bid price of $0.03. 
    requestRequest.setSpotPrice("0.03");
    requestRequest.setInstanceCount(Integer.valueOf(1));
    // Setup the specifications of the launch. This includes the instance type (e.g. t1.micro)
    // and the latest Amazon Linux AMI id available. Note, you should always use the latest 
    // Amazon Linux AMI id or another of your choosing.
    LaunchSpecification launchSpecification = new LaunchSpecification();
    launchSpecification.setImageId("ami-8c1fece5");
    launchSpecification.setInstanceType("t1.micro");
    // Add the security group to the request.
    ArrayList<String> securityGroups = new ArrayList<String>();
    securityGroups.add("GettingStartedGroup");
    launchSpecification.setSecurityGroups(securityGroups);
    // Add the launch specifications to the request.
    requestRequest.setLaunchSpecification(launchSpecification);
    //============================================================================================//
    //=========================== Getting the Request ID from the Request ========================// 
    //============================================================================================//
    // Call the RequestSpotInstance API. 
    RequestSpotInstancesResult requestResult = ec2.requestSpotInstances(requestRequest);
    List<SpotInstanceRequest> requestResponses = requestResult.getSpotInstanceRequests();
    // Setup an arraylist to collect all of the request ids we want to watch hit the running
    // state.
    ArrayList<String> spotInstanceRequestIds = new ArrayList<String>();
    // active state.
    for (SpotInstanceRequest requestResponse : requestResponses) {
        System.out.println("Created Spot Request: " + requestResponse.getSpotInstanceRequestId());
        spotInstanceRequestIds.add(requestResponse.getSpotInstanceRequestId());
    }
    //============================================================================================//
    //====================================== Tag the Spot Requests ===============================// 
    //============================================================================================//
    // Create the list of tags we want to create
    ArrayList<Tag> requestTags = new ArrayList<Tag>();
    requestTags.add(new Tag("keyname1", "value1"));
    // Create a tag request for requests.
    CreateTagsRequest createTagsRequest_requests = new CreateTagsRequest();
    createTagsRequest_requests.setResources(spotInstanceRequestIds);
    createTagsRequest_requests.setTags(requestTags);
    // Try to tag the Spot request submitted.
    try {
        ec2.createTags(createTagsRequest_requests);
    } catch (AmazonServiceException e) {
        System.out.println("Error terminating instances");
        System.out.println("Caught Exception: " + e.getMessage());
        System.out.println("Reponse Status Code: " + e.getStatusCode());
        System.out.println("Error Code: " + e.getErrorCode());
        System.out.println("Request ID: " + e.getRequestId());
    }
    //============================================================================================//
    //=========================== Determining the State of the Spot Request ======================// 
    //============================================================================================//
    // Create a variable that will track whether there are any requests still in the open state.
    boolean anyOpen;
    // Initialize variables.
    ArrayList<String> instanceIds = new ArrayList<String>();
    do {
        // Create the describeRequest with tall of the request id to monitor (e.g. that we started).
        DescribeSpotInstanceRequestsRequest describeRequest = new DescribeSpotInstanceRequestsRequest();
        describeRequest.setSpotInstanceRequestIds(spotInstanceRequestIds);
        // Initialize the anyOpen variable to false – which assumes there are no requests open unless
        // we find one that is still open.
        anyOpen = false;
        try {
            // Retrieve all of the requests we want to monitor. 
            DescribeSpotInstanceRequestsResult describeResult = ec2.describeSpotInstanceRequests(describeRequest);
            List<SpotInstanceRequest> describeResponses = describeResult.getSpotInstanceRequests();
            // Look through each request and determine if they are all in the active state.
            for (SpotInstanceRequest describeResponse : describeResponses) {
                // cancelled so we compare against open instead of active.
                if (describeResponse.getState().equals("open")) {
                    anyOpen = true;
                    break;
                }
                // Add the instance id to the list we will eventually terminate.
                instanceIds.add(describeResponse.getInstanceId());
            }
        } catch (AmazonServiceException e) {
            anyOpen = true;
        }
        try {
            // Sleep for 60 seconds.
            Thread.sleep(60 * 1000);
        } catch (Exception e) {
        }
    } while (anyOpen);
    //============================================================================================//
    //====================================== Tag the Spot Instances ===============================// 
    //============================================================================================//
    // Create the list of tags we want to create
    ArrayList<Tag> instanceTags = new ArrayList<Tag>();
    instanceTags.add(new Tag("keyname1", "value1"));
    // Create a tag request for instances.
    CreateTagsRequest createTagsRequest_instances = new CreateTagsRequest();
    createTagsRequest_instances.setResources(instanceIds);
    createTagsRequest_instances.setTags(instanceTags);
    // Try to tag the Spot instance started.
    try {
        ec2.createTags(createTagsRequest_instances);
    } catch (AmazonServiceException e) {
        System.out.println("Error terminating instances");
        System.out.println("Caught Exception: " + e.getMessage());
        System.out.println("Reponse Status Code: " + e.getStatusCode());
        System.out.println("Error Code: " + e.getErrorCode());
        System.out.println("Request ID: " + e.getRequestId());
    }
    try {
        // Cancel requests.
        CancelSpotInstanceRequestsRequest cancelRequest = new CancelSpotInstanceRequestsRequest(spotInstanceRequestIds);
        ec2.cancelSpotInstanceRequests(cancelRequest);
    } catch (AmazonServiceException e) {
        System.out.println("Error cancelling instances");
        System.out.println("Caught Exception: " + e.getMessage());
        System.out.println("Reponse Status Code: " + e.getStatusCode());
        System.out.println("Error Code: " + e.getErrorCode());
        System.out.println("Request ID: " + e.getRequestId());
    }
    //============================================================================================//
    try {
        // Terminate instances.
        TerminateInstancesRequest terminateRequest = new TerminateInstancesRequest(instanceIds);
        ec2.terminateInstances(terminateRequest);
    } catch (AmazonServiceException e) {
        System.out.println("Error terminating instances");
        System.out.println("Caught Exception: " + e.getMessage());
        System.out.println("Reponse Status Code: " + e.getStatusCode());
        System.out.println("Error Code: " + e.getErrorCode());
        System.out.println("Request ID: " + e.getRequestId());
    }
}
Example 38
Project: camel-master  File: SqsEndpoint.java View source code
/**
     * Provide the possibility to override this method for an mock implementation
     * @return AmazonSQSClient
     */
AmazonSQS createClient() {
    AmazonSQS client = null;
    ClientConfiguration clientConfiguration = null;
    boolean isClientConfigFound = false;
    if (ObjectHelper.isNotEmpty(configuration.getProxyHost()) && ObjectHelper.isNotEmpty(configuration.getProxyPort())) {
        clientConfiguration = new ClientConfiguration();
        clientConfiguration.setProxyHost(configuration.getProxyHost());
        clientConfiguration.setProxyPort(configuration.getProxyPort());
        isClientConfigFound = true;
    }
    if (configuration.getAccessKey() != null && configuration.getSecretKey() != null) {
        AWSCredentials credentials = new BasicAWSCredentials(configuration.getAccessKey(), configuration.getSecretKey());
        if (isClientConfigFound) {
            client = new AmazonSQSClient(credentials, clientConfiguration);
        } else {
            client = new AmazonSQSClient(credentials);
        }
    } else {
        if (isClientConfigFound) {
            client = new AmazonSQSClient();
        } else {
            client = new AmazonSQSClient(clientConfiguration);
        }
    }
    return client;
}
Example 39
Project: cas-master  File: DynamoDbCloudConfigBootstrapConfiguration.java View source code
private static AmazonDynamoDBClient getAmazonDynamoDbClient(final Environment environment) {
    final ClientConfiguration cfg = new ClientConfiguration();
    try {
        final String localAddress = getSetting(environment, "localAddress");
        if (StringUtils.isNotBlank(localAddress)) {
            cfg.setLocalAddress(InetAddress.getByName(localAddress));
        }
    } catch (final Exception e) {
        LOGGER.error(e.getMessage(), e);
    }
    final String key = getSetting(environment, "credentialAccessKey");
    final String secret = getSetting(environment, "credentialSecretKey");
    final AWSCredentials credentials = new BasicAWSCredentials(key, secret);
    final AmazonDynamoDBClient client;
    if (credentials == null) {
        client = new AmazonDynamoDBClient(cfg);
    } else {
        client = new AmazonDynamoDBClient(credentials, cfg);
    }
    final String endpoint = getSetting(environment, "endpoint");
    if (StringUtils.isNotBlank(endpoint)) {
        client.setEndpoint(endpoint);
    }
    final String region = getSetting(environment, "region");
    if (StringUtils.isNotBlank(region)) {
        client.setRegion(Region.getRegion(Regions.valueOf(region)));
    }
    final String regionOverride = getSetting(environment, "regionOverride");
    if (StringUtils.isNotBlank(regionOverride)) {
        client.setSignerRegionOverride(regionOverride);
    }
    return client;
}
Example 40
Project: cloudify-master  File: S3AWSAPIDeployMojo.java View source code
public void execute() throws MojoExecutionException, MojoFailureException {
    BlobStoreContext context = null;
    try {
        getLog().info("Using aws-sdk-java");
        AWSCredentials awsCredentials = new BasicAWSCredentials(user, key);
        AmazonS3 s3 = new AmazonS3Client(awsCredentials);
        uploadFile(s3, container, target, source);
    } catch (Exception e) {
        throw new MojoFailureException("Failed put operation", e);
    } finally {
        if (context != null) {
            context.close();
        }
    }
}
Example 41
Project: cloudpier-adapters-master  File: BeanstalkFirstDeployment.java View source code
public void actionPerformed(ActionEvent ae) {
    JFileChooser fileChooser = new JFileChooser();
    int showOpenDialog = fileChooser.showOpenDialog(frame);
    if (showOpenDialog != JFileChooser.APPROVE_OPTION) {
        return;
    }
    //TODO if update it must not be called
    //if bucket does not exist create
    createAmazonS3Bucket();
    //TODO. better take name from s3 not local
    war_name_on_s3 = fileChooser.getSelectedFile().getName();
    System.out.println("war_name_on_s3=" + war_name_on_s3);
    ProgressListener progressListener = new ProgressListener() {

        public void progressChanged(ProgressEvent progressEvent) {
            if (upload == null) {
                return;
            }
            pb.setValue((int) upload.getProgress().getPercentTransfered());
            switch(progressEvent.getEventCode()) {
                case ProgressEvent.COMPLETED_EVENT_CODE:
                    pb.setValue(100);
                    break;
                case ProgressEvent.FAILED_EVENT_CODE:
                    try {
                        AmazonClientException e = upload.waitForException();
                        JOptionPane.showMessageDialog(frame, "Unable to upload file to Amazon S3: " + e.getMessage(), "Error Uploading File", JOptionPane.ERROR_MESSAGE);
                    } catch (InterruptedException e) {
                    }
                    break;
            }
        }
    };
    ///another example(no JFrame)
    /*
             *
            AWSCredentials myCredentials = new BasicAWSCredentials(...);
            TransferManager tx = new TransferManager(myCredentials);
            Upload myUpload = tx.upload(myBucket, myFile.getName(), myFile);

            while (myUpload.isDone() == false) {
            System.out.println("Transfer: " + myUpload.getDescription());
            System.out.println("  - State: " + myUpload.getState());
            System.out.println("  - Progress: " + myUpload.getProgress().getBytesTransfered());
            // Do work while we wait for our upload to complete...
            Thread.sleep(500);
            }

             *
             */
    File fileToUpload = fileChooser.getSelectedFile();
    PutObjectRequest request = new PutObjectRequest(bucketName, fileToUpload.getName(), fileToUpload).withProgressListener(progressListener);
    //prepei na mpei me kapoio allon elegxo, me
    uploaded_to_s3 = true;
    upload = tx.upload(request);
    //arkoudia wait
    System.out.println("Starting......");
    // pause for a while
    Thread thisThread = Thread.currentThread();
    try {
        thisThread.sleep(15000);
    } catch (Throwable t) {
        throw new OutOfMemoryError("An Error has occured");
    }
    System.out.println("Ending......");
    ////////////////////////////////////starting CreateApplicationVersion
    if (uploaded_to_s3 == true) {
        System.out.println("starting CreateApplicationVersion");
        System.out.println("accessKeyId" + accessKeyId);
        System.out.println("secretAccessKey" + secretAccessKey);
        System.out.println("war_name_on_s3" + war_name_on_s3);
        System.out.println("bucketName" + bucketName);
        System.out.println("appname" + appname);
        System.out.println("appversion" + appversion);
        //CREATE APPLICATION - NO VERSION, NO WAR
        BeansCreateApplication bst_createapp = new BeansCreateApplication();
        try {
            bst_createapp.creatapp(accessKeyId, secretAccessKey, appname, "cloud4soa-created-app");
        } catch (Exception ex) {
            Logger.getLogger(BeanstalkFirstDeployment.class.getName()).log(Level.SEVERE, null, ex);
        }
        //NOW, CREATE APPLICATION VERSION
        BeansCreateApplicationVersion bst_createversion = new BeansCreateApplicationVersion();
        try {
            bst_createversion.creatappversion(accessKeyId, secretAccessKey, war_name_on_s3, bucketName, appname, appversion);
            version_updated = true;
        } catch (Exception ex) {
            Logger.getLogger(BeanstalkFirstDeployment.class.getName()).log(Level.SEVERE, null, ex);
        }
    }
    //createenviroment if not given
    if (version_updated == true) {
        BeansCreateEnvironment bst_createenvironment = new BeansCreateEnvironment();
        try {
            bst_createenvironment.createupdateenvironment(accessKeyId, secretAccessKey, environment_name, appname, appversion, "descriptionUpdateby-app:" + appname + "-version:" + appversion);
        } catch (InvalidParameterValueException ex) {
            System.out.println("Enviroment already there!Starting BeansUpdateEnvironment");
            BeansUpdateEnvironment bst_updateenvironment = new BeansUpdateEnvironment();
            try {
                bst_updateenvironment.updateenvironment(accessKeyId, secretAccessKey, environment_name, appname, appversion, "descriptionUpdateby-app:" + appname + "-version:" + appversion);
            } catch (Exception ex2) {
                Logger.getLogger(BeanstalkDeploy.class.getName()).log(Level.SEVERE, null, ex2);
            }
        }
    }
}
Example 42
Project: enhanced-snapshots-master  File: AmazonConfigProviderDEV.java View source code
@Bean
public AWSCredentials awsCredentials() {
    if (awsCredentials == null) {
        String accessKey = cryptoService.decrypt(configurationId, amazonAWSAccessKey);
        String secretKey = cryptoService.decrypt(configurationId, amazonAWSSecretKey);
        awsCredentials = new BasicAWSCredentials(accessKey, secretKey);
    }
    return awsCredentials;
}
Example 43
Project: hadoop-release-2.6.0-master  File: ITestS3ATemporaryCredentials.java View source code
@Test
public void testTemporaryCredentialValidation() throws Throwable {
    Configuration conf = new Configuration();
    conf.set(ACCESS_KEY, "accesskey");
    conf.set(SECRET_KEY, "secretkey");
    conf.set(SESSION_TOKEN, "");
    TemporaryAWSCredentialsProvider provider = new TemporaryAWSCredentialsProvider(getFileSystem().getUri(), conf);
    try {
        AWSCredentials credentials = provider.getCredentials();
        fail("Expected a CredentialInitializationException," + " got " + credentials);
    } catch (CredentialInitializationException expected) {
    }
}
Example 44
Project: judochop-master  File: AmazonUtils.java View source code
/**
     * @param accessKey
     * @param secretKey
     * @return
     */
public static AmazonEC2Client getEC2Client(String accessKey, String secretKey) {
    AWSCredentialsProvider provider;
    if (accessKey != null && secretKey != null) {
        AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
        provider = new StaticCredentialsProvider(credentials);
    } else {
        provider = new DefaultAWSCredentialsProviderChain();
    }
    AmazonEC2Client client = new AmazonEC2Client(provider);
    ClientConfiguration configuration = new ClientConfiguration();
    configuration.setProtocol(Protocol.HTTPS);
    client.setConfiguration(configuration);
    return client;
}
Example 45
Project: megam_chef-master  File: S3.java View source code
/**
	 * <p>download.</p>
	 *
	 * @param vl a {@link java.lang.String} object.
	 * @throws org.megam.chef.exception.ProvisionerException if any.
	 */
public static void download(String vl) throws ProvisionerException {
    String bucketName = Constants.BUCKET_NAME;
    AWSCredentials credentials = new BasicAWSCredentials(Constants.MEGAM_AWS_ACCESS_KEY, Constants.MEGAM_AWS_SECRET_ID);
    AmazonS3 conn = new AmazonS3Client(credentials);
    conn.setEndpoint("s3-ap-southeast-1.amazonaws.com");
    logger.debug("Download ..." + vl);
    ObjectListing objectListing = conn.listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix(vl + "/").withDelimiter("/"));
    for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
        if (objectSummary.getSize() > 0) {
            logger.debug("Download ...----" + objectSummary.getKey());
            conn.getObject(new GetObjectRequest(bucketName, objectSummary.getKey()), new File(Constants.MEGAM_VAULT + bucketName + java.io.File.separator + objectSummary.getKey()));
        }
    }
    logger.debug("Download completed.....");
}
Example 46
Project: reinvent2013-mobile-photo-share-master  File: Configuration.java View source code
private static String getAWSAccountID() {
    try {
        String accessKey = AWS_ACCESS_KEY_ID;
        String secretKey = AWS_SECRET_KEY;
        if (Utilities.isEmpty(accessKey) || Utilities.isEmpty(secretKey)) {
            return null;
        }
        AWSCredentials creds = new BasicAWSCredentials(accessKey, secretKey);
        AmazonIdentityManagementClient iam = new AmazonIdentityManagementClient(creds);
        return iam.getUser().getUser().getArn().split(":")[4];
    } catch (AmazonClientException e) {
        throw new RuntimeException("Failed to get AWS account id", e);
    }
}
Example 47
Project: SecureShareLib-master  File: S3SiteController.java View source code
@Override
protected UploadResult doInBackground(String... mediaPaths) {
    UploadResult result = null;
    if (null == mediaPaths[0]) {
        jobFailed(null, 7000000, "S3 media path is null");
        return result;
    }
    File mediaFile = new File(mediaPaths[0]);
    if (!mediaFile.exists()) {
        jobFailed(null, 7000001, "S3 media path invalid");
        return result;
    }
    try {
        final AWSCredentials credentials = new BasicAWSCredentials(mContext.getString(R.string.s3_key), mContext.getString(R.string.s3_secret));
        Log.i(TAG, "upload file: " + mediaFile.getName());
        AmazonS3Client s3Client = new AmazonS3Client(credentials, s3Config);
        TransferManager transferManager = new TransferManager(s3Client);
        Upload upload = transferManager.upload(bucket, pathPrefix + mediaFile.getName(), mediaFile);
        result = upload.waitForUploadResult();
    } catch (Exception e) {
        Timber.e("upload error: " + e.getMessage());
        jobFailed(null, 7000002, "S3 upload failed: " + e.getMessage());
    }
    return result;
}
Example 48
Project: spark-cstar-canaries-master  File: Producer.java View source code
public static AWSCredentials getCreds() throws Exception {
    String msg = "Cannot load AWS credentials, no 'default' profile available.";
    try {
        AWSCredentialsProvider provider = new ProfileCredentialsProvider("default");
        return provider.getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException(msg, e);
    }
}
Example 49
Project: storm-crawler-master  File: AbstractS3CacheBolt.java View source code
/** Returns an S3 client given the configuration **/
public static AmazonS3Client getS3Client(Map conf) {
    AWSCredentialsProvider provider = new DefaultAWSCredentialsProviderChain();
    AWSCredentials credentials = provider.getCredentials();
    ClientConfiguration config = new ClientConfiguration();
    AmazonS3Client client = new AmazonS3Client(credentials, config);
    String regionName = ConfUtils.getString(conf, REGION);
    if (StringUtils.isNotBlank(regionName)) {
        client.setRegion(RegionUtils.getRegion(regionName));
    }
    String endpoint = ConfUtils.getString(conf, ENDPOINT);
    if (StringUtils.isNotBlank(endpoint)) {
        client.setEndpoint(endpoint);
    }
    return client;
}
Example 50
Project: storm-s3-master  File: UploaderFactory.java View source code
public static Uploader buildUploader(Map conf) {
    Protocol protocol = Protocol.HTTPS;
    String proxy = null;
    int proxyPort = 0;
    if (conf.containsKey(S3_PROTOCOL)) {
        protocol = Protocol.valueOf((String) conf.get(S3_PROTOCOL));
    }
    if (conf.containsKey(S3_PROXY)) {
        proxy = (String) conf.get(S3_PROXY);
    }
    if (conf.containsKey(S3_PROXY_PORT)) {
        proxyPort = ((Long) conf.get(S3_PROXY_PORT)).intValue();
    }
    AWSCredentialsProvider provider = new DefaultAWSCredentialsProviderChain();
    AWSCredentials credentials = provider.getCredentials();
    ClientConfiguration config = new ClientConfiguration().withProtocol(protocol);
    if (proxy != null) {
        config.withProxyHost(proxy);
    }
    if (proxyPort != 0) {
        config.withProxyPort(proxyPort);
    }
    AmazonS3 client = new AmazonS3Client(credentials, config);
    if (conf.containsKey(S3_ENDPOINT)) {
        client.setEndpoint((String) conf.get(S3_ENDPOINT));
    }
    return getUploader(conf, client);
}
Example 51
Project: testcases-master  File: CommonCallbackHandler.java View source code
public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
    for (int i = 0; i < callbacks.length; i++) {
        if (callbacks[i] instanceof WSPasswordCallback) {
            WSPasswordCallback pc = (WSPasswordCallback) callbacks[i];
            if (pc.getUsage() == WSPasswordCallback.SECRET_KEY) {
                final AWSCredentials creds = new BasicAWSCredentials(accessKey, secretKey);
                AWSKMSClient kms = new AWSKMSClient(creds);
                kms.setEndpoint(endpoint);
                if (pc.getEncryptedSecret() != null) {
                    ByteBuffer encryptedKey = ByteBuffer.wrap(pc.getEncryptedSecret());
                    DecryptRequest req = new DecryptRequest().withCiphertextBlob(encryptedKey);
                    ByteBuffer plaintextKey = kms.decrypt(req).getPlaintext();
                    byte[] key = new byte[plaintextKey.remaining()];
                    plaintextKey.get(key);
                    pc.setKey(key);
                } else {
                    GenerateDataKeyRequest dataKeyRequest = new GenerateDataKeyRequest();
                    dataKeyRequest.setKeyId(masterKeyId);
                    String algorithm = "AES_128";
                    if (pc.getAlgorithm() != null && pc.getAlgorithm().contains("aes256")) {
                        algorithm = "AES_256";
                    }
                    dataKeyRequest.setKeySpec(algorithm);
                    GenerateDataKeyResult dataKeyResult = kms.generateDataKey(dataKeyRequest);
                    ByteBuffer plaintextKey = dataKeyResult.getPlaintext();
                    byte[] key = new byte[plaintextKey.remaining()];
                    plaintextKey.get(key);
                    pc.setKey(key);
                    ByteBuffer encryptedKey = dataKeyResult.getCiphertextBlob();
                    byte[] encKey = new byte[encryptedKey.remaining()];
                    encryptedKey.get(encKey);
                    pc.setEncryptedSecret(encKey);
                    // Create a KeyName pointing to the encryption key
                    Document doc = DOMUtils.newDocument();
                    Element keyInfoElement = doc.createElementNS(WSConstants.SIG_NS, WSConstants.SIG_PREFIX + ":" + WSConstants.KEYINFO_LN);
                    keyInfoElement.setAttributeNS(WSConstants.XMLNS_NS, "xmlns:" + WSConstants.SIG_PREFIX, WSConstants.SIG_NS);
                    Element keyNameElement = doc.createElementNS(WSConstants.SIG_NS, WSConstants.SIG_PREFIX + ":KeyName");
                    keyNameElement.setTextContent("1c84a3f2-51cc-4c66-9045-68f51ef8b1eb");
                    keyInfoElement.appendChild(keyNameElement);
                    pc.setKeyInfoReference(keyInfoElement);
                }
            }
        }
    }
}
Example 52
Project: user-master  File: AmazonUtils.java View source code
/**
     * @param accessKey
     * @param secretKey
     * @return
     */
public static AmazonEC2Client getEC2Client(String accessKey, String secretKey) {
    AWSCredentialsProvider provider;
    if (accessKey != null && secretKey != null) {
        AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
        provider = new StaticCredentialsProvider(credentials);
    } else {
        provider = new DefaultAWSCredentialsProviderChain();
    }
    AmazonEC2Client client = new AmazonEC2Client(provider);
    ClientConfiguration configuration = new ClientConfiguration();
    configuration.setProtocol(Protocol.HTTPS);
    client.setConfiguration(configuration);
    return client;
}
Example 53
Project: usergrid-master  File: AmazonUtils.java View source code
/**
     * @param accessKey
     * @param secretKey
     * @return
     */
public static AmazonEC2Client getEC2Client(String accessKey, String secretKey) {
    AWSCredentialsProvider provider;
    if (accessKey != null && secretKey != null) {
        AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
        provider = new StaticCredentialsProvider(credentials);
    } else {
        provider = new DefaultAWSCredentialsProviderChain();
    }
    AmazonEC2Client client = new AmazonEC2Client(provider);
    ClientConfiguration configuration = new ClientConfiguration();
    configuration.setProtocol(Protocol.HTTPS);
    client.setConfiguration(configuration);
    return client;
}
Example 54
Project: VTHacks-Android-master  File: RegisterWithSNSRunnable.java View source code
/**
	 * Registers this device with SNS.
	 * 
	 * Precondition: GCM ID and AWS credentials for this device are stored in shared prefs.
	 * Success: Device is registered for push notifications and this is stored in shared prefs.
	 */
@Override
public void run() {
    SharedPreferences sharedPreferences = context.getSharedPreferences(Constants.PREFS, Context.MODE_PRIVATE);
    if (!sharedPreferences.getBoolean(Constants.PREFS_AWS_REGISTERED, false)) {
        String gcmId = sharedPreferences.getString(Constants.PREFS_GCM_ID, null);
        String secretAccessKey = sharedPreferences.getString(Constants.PREFS_AWS_SECRET_ACCESS_KEY, null);
        String securityToken = sharedPreferences.getString(Constants.PREFS_AWS_SECURITY_TOKEN, null);
        String expiration = sharedPreferences.getString(Constants.PREFS_AWS_EXPIRATION, null);
        String accessKeyID = sharedPreferences.getString(Constants.PREFS_AWS_ACCESS_KEY_ID, null);
        if (gcmId == null || secretAccessKey == null || securityToken == null || expiration == null || accessKeyID == null) {
            Log.d(TAG, "Not all credentials are available.");
            return;
        }
        if (GetAWSCredentialsRunnable.areCredentialsExpired(expiration)) {
            new GetAWSCredentialsRunnable(context, 1024).run();
            retrySNS();
            return;
        }
        AWSCredentials credentials = new BasicSessionCredentials(accessKeyID, secretAccessKey, securityToken);
        AmazonSNSClient client = new AmazonSNSClient(credentials);
        CreatePlatformEndpointRequest createEndpointRequest = new CreatePlatformEndpointRequest();
        createEndpointRequest.setPlatformApplicationArn(Constants.AWS_PLATFORM_APPLICATION_ARN);
        createEndpointRequest.setToken(gcmId);
        CreatePlatformEndpointResult endpointResult = client.createPlatformEndpoint(createEndpointRequest);
        if (endpointResult == null) {
            Log.d(TAG, "Failed to create platform endpoint.");
            retrySNS();
            return;
        }
        String snsArn = endpointResult.getEndpointArn();
        SubscribeRequest subscribeRequest = new SubscribeRequest(Constants.AWS_TOPIC_ARN, Constants.AWS_PROTOCOL, snsArn);
        SubscribeResult subscribeResult = client.subscribe(subscribeRequest);
        if (subscribeResult == null) {
            Log.d(TAG, "Failed to subscribe to topic.");
            retrySNS();
            return;
        }
        Editor editor = sharedPreferences.edit();
        editor.putBoolean(Constants.PREFS_AWS_REGISTERED, true);
        editor.commit();
    }
}
Example 55
Project: web-crawler-master  File: AbstractS3CacheBolt.java View source code
/** Returns an S3 client given the configuration **/
public static AmazonS3Client getS3Client(Map conf) {
    AWSCredentialsProvider provider = new DefaultAWSCredentialsProviderChain();
    AWSCredentials credentials = provider.getCredentials();
    ClientConfiguration config = new ClientConfiguration();
    AmazonS3Client client = new AmazonS3Client(credentials, config);
    String regionName = ConfUtils.getString(conf, REGION);
    if (StringUtils.isNotBlank(regionName)) {
        client.setRegion(RegionUtils.getRegion(regionName));
    }
    String endpoint = ConfUtils.getString(conf, ENDPOINT);
    if (StringUtils.isNotBlank(endpoint)) {
        client.setEndpoint(endpoint);
    }
    return client;
}
Example 56
Project: ambari-master  File: S3Util.java View source code
public static TransferManager getTransferManager(String accessKey, String secretKey) {
    AWSCredentials awsCredentials = AWSUtil.createAWSCredentials(accessKey, secretKey);
    TransferManager transferManager;
    if (awsCredentials != null) {
        transferManager = new TransferManager(awsCredentials);
    } else {
        transferManager = new TransferManager();
    }
    return transferManager;
}
Example 57
Project: aws-codedeploy-plugin-master  File: AWSClients.java View source code
private static AWSCredentials getCredentials(String iamRole, String externalId) {
    if (isEmpty(iamRole))
        return null;
    AWSSecurityTokenServiceClient sts = new AWSSecurityTokenServiceClient();
    int credsDuration = (int) (AWSCodeDeployPublisher.DEFAULT_TIMEOUT_SECONDS * AWSCodeDeployPublisher.DEFAULT_POLLING_FREQUENCY_SECONDS);
    if (credsDuration > 3600) {
        credsDuration = 3600;
    }
    AssumeRoleResult assumeRoleResult = sts.assumeRole(new AssumeRoleRequest().withRoleArn(iamRole).withExternalId(externalId).withDurationSeconds(credsDuration).withRoleSessionName(AWSCodeDeployPublisher.ROLE_SESSION_NAME));
    Credentials stsCredentials = assumeRoleResult.getCredentials();
    BasicSessionCredentials credentials = new BasicSessionCredentials(stsCredentials.getAccessKeyId(), stsCredentials.getSecretAccessKey(), stsCredentials.getSessionToken());
    return credentials;
}
Example 58
Project: aws-codepipeline-plugin-for-jenkins-master  File: AWSClientsTest.java View source code
@Test
public void createsCodePipelineClientUsingProxyHostAndPort() {
    // when
    final AWSClients awsClients = new AWSClients(Region.getRegion(Regions.US_WEST_2), mock(AWSCredentials.class), PROXY_HOST, PROXY_PORT, PLUGIN_VERSION, codePipelineClientFactory, s3ClientFactory);
    final AWSCodePipeline codePipelineClient = awsClients.getCodePipelineClient();
    // then
    assertEquals(expectedCodePipelineClient, codePipelineClient);
    final ArgumentCaptor<ClientConfiguration> clientConfigurationCaptor = ArgumentCaptor.forClass(ClientConfiguration.class);
    verify(codePipelineClientFactory).getAWSCodePipelineClient(any(AWSCredentials.class), clientConfigurationCaptor.capture());
    final ClientConfiguration clientConfiguration = clientConfigurationCaptor.getValue();
    assertEquals(PROXY_HOST, clientConfiguration.getProxyHost());
    assertEquals(PROXY_PORT, clientConfiguration.getProxyPort());
    verify(codePipelineClient).setRegion(Region.getRegion(Regions.US_WEST_2));
}
Example 59
Project: aws-mobile-sample-wif-master  File: AmazonSharedPreferencesWrapper.java View source code
/**
	 * Gets the AWS Access Key, AWS Secret Key and Security Token currently stored in Shared Preferences. Then creates a Credentials object and
	 * returns that object.
	 */
public static AWSCredentials getCredentialsFromSharedPreferences(SharedPreferences sharedPreferences) {
    String accessKey = AmazonSharedPreferencesWrapper.getValueFromSharedPreferences(sharedPreferences, AWS_ACCESS_KEY);
    String secretKey = AmazonSharedPreferencesWrapper.getValueFromSharedPreferences(sharedPreferences, AWS_SECRET_KEY);
    String securityToken = AmazonSharedPreferencesWrapper.getValueFromSharedPreferences(sharedPreferences, AWS_SECURITY_TOKEN);
    return new BasicSessionCredentials(accessKey, secretKey, securityToken);
}
Example 60
Project: axemblr-provisionr-master  File: ProviderClientCacheSupplier.java View source code
@Override
public AmazonEC2 load(Provider provider) throws Exception {
    String region = Optional.fromNullable(provider.getOptions().get(ProviderOptions.REGION)).or(ProviderOptions.DEFAULT_REGION);
    AWSCredentials credentials = new BasicAWSCredentials(provider.getAccessKey(), provider.getSecretKey());
    AmazonEC2 client = new AmazonEC2Client(credentials, new ClientConfiguration().withUserAgent(AXEMBLR_USER_AGENT));
    if (provider.getEndpoint().isPresent()) {
        LOG.info("Using endpoint {} as configured", provider.getEndpoint().get());
        client.setEndpoint(provider.getEndpoint().get());
    } else {
        LOG.info(">> Searching endpoint for region {}", region);
        DescribeRegionsRequest request = new DescribeRegionsRequest().withRegionNames(region);
        DescribeRegionsResult result = client.describeRegions(request);
        checkArgument(result.getRegions().size() == 1, "Invalid region name %s. Expected one result found %s", region, result.getRegions());
        LOG.info("<< Using endpoint {} for region {}", result.getRegions().get(0).getEndpoint(), region);
        client.setEndpoint(result.getRegions().get(0).getEndpoint());
    }
    return client;
}
Example 61
Project: cloudExplorer-master  File: BucketMigrationCLI.java View source code
String listBuckets(String access_key, String secret_key, String endpoint) {
    AWSCredentials credentials = new BasicAWSCredentials(access_key, secret_key);
    AmazonS3 s3Client = new AmazonS3Client(credentials, new ClientConfiguration());
    if (endpoint.contains("amazonaws.com")) {
        String aws_endpoint = s3Client.getBucketLocation(new GetBucketLocationRequest(bucket));
        if (aws_endpoint.contains("US")) {
            s3Client.setEndpoint("https://s3.amazonaws.com");
        } else if (aws_endpoint.contains("us-west")) {
            s3Client.setEndpoint("https://s3-" + aws_endpoint + ".amazonaws.com");
        } else if (aws_endpoint.contains("eu-west")) {
            s3Client.setEndpoint("https://s3-" + aws_endpoint + ".amazonaws.com");
        } else if (aws_endpoint.contains("ap-")) {
            s3Client.setEndpoint("https://s3-" + aws_endpoint + ".amazonaws.com");
        } else if (aws_endpoint.contains("sa-east-1")) {
            s3Client.setEndpoint("https://s3-" + aws_endpoint + ".amazonaws.com");
        } else {
            s3Client.setEndpoint("https://s3." + aws_endpoint + ".amazonaws.com");
        }
    } else {
        s3Client.setS3ClientOptions(S3ClientOptions.builder().setPathStyleAccess(true).build());
        s3Client.setEndpoint(endpoint);
    }
    String[] array = new String[10];
    String bucketlist = null;
    try {
        for (Bucket bucket : s3Client.listBuckets()) {
            bucketlist = bucketlist + " " + bucket.getName();
        }
    } catch (Exception listBucket) {
        if (NewJFrame.gui) {
            NewJFrame.jTextArea1.append("\n\nAn error has occurred in listBucket.");
            NewJFrame.jTextArea1.append("\n\nError Message:    " + listBucket.getMessage());
        } else {
            System.out.print("\n\nAn error has occurred in listBucket.");
            System.out.print("\n\nError Message:    " + listBucket.getMessage());
        }
    }
    String parse = null;
    if (bucketlist != null) {
        parse = bucketlist.replace("null", "");
    } else {
        parse = "no_bucket_found";
    }
    return parse;
}
Example 62
Project: CloudTrailViewer-master  File: AwsService.java View source code
private AmazonS3 getS3ClientUsingKeys(AwsAccount activeAccount) {
    String key = activeAccount.getKey();
    String secret = activeAccount.getSecret();
    if ((key != null && key.trim().length() > 10) && (secret != null && secret.trim().length() > 10)) {
        AWSCredentials credentials = new BasicAWSCredentials(key, secret);
        return new AmazonS3Client(credentials);
    }
    return null;
}
Example 63
Project: dynamodb-geo-master  File: GeoDynamoDBServlet.java View source code
private void setupGeoDataManager() {
    String accessKey = System.getProperty("AWS_ACCESS_KEY_ID");
    String secretKey = System.getProperty("AWS_SECRET_KEY");
    String tableName = System.getProperty("PARAM1");
    String regionName = System.getProperty("PARAM2");
    AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
    AmazonDynamoDBClient ddb = new AmazonDynamoDBClient(credentials);
    Region region = Region.getRegion(Regions.fromName(regionName));
    ddb.setRegion(region);
    config = new GeoDataManagerConfiguration(ddb, tableName);
    geoDataManager = new GeoDataManager(config);
}
Example 64
Project: eucalyptus-master  File: IoInternalHmacHandler.java View source code
private AWSCredentials credentials() {
    final Pair<Long, AWSCredentials> credentialsPair = credentialsRef.get();
    if (credentialsPair == null || credentialsPair.getLeft() < expiry(EXPIRY_OFFSET)) {
        // no credentials or they have expired, must wait for new
        try (final LockResource lock = LockResource.lock(credentialsRefreshLock)) {
            return perhapsRefreshCredentials();
        }
    } else if (credentialsPair.getLeft() < expiry(PRE_EXPIRY)) {
        // credentials pre-expired, refresh if no one else is doing so
        try (final LockResource lock = LockResource.tryLock(credentialsRefreshLock)) {
            if (lock.isLocked()) {
                return perhapsRefreshCredentials();
            } else {
                return credentialsPair.getRight();
            }
        }
    } else {
        return credentialsPair.getRight();
    }
}
Example 65
Project: flink-master  File: AWSUtil.java View source code
/**
	 * Return a {@link AWSCredentialsProvider} instance corresponding to the configuration properties.
	 *
	 * @param configProps the configuration properties
	 * @return The corresponding AWS Credentials Provider instance
	 */
public static AWSCredentialsProvider getCredentialsProvider(final Properties configProps) {
    CredentialProvider credentialProviderType;
    if (!configProps.containsKey(AWSConfigConstants.AWS_CREDENTIALS_PROVIDER)) {
        if (configProps.containsKey(AWSConfigConstants.AWS_ACCESS_KEY_ID) && configProps.containsKey(AWSConfigConstants.AWS_SECRET_ACCESS_KEY)) {
            // if the credential provider type is not specified, but the Access Key ID and Secret Key are given, it will default to BASIC
            credentialProviderType = CredentialProvider.BASIC;
        } else {
            // if the credential provider type is not specified, it will default to AUTO
            credentialProviderType = CredentialProvider.AUTO;
        }
    } else {
        credentialProviderType = CredentialProvider.valueOf(configProps.getProperty(AWSConfigConstants.AWS_CREDENTIALS_PROVIDER));
    }
    AWSCredentialsProvider credentialsProvider;
    switch(credentialProviderType) {
        case ENV_VAR:
            credentialsProvider = new EnvironmentVariableCredentialsProvider();
            break;
        case SYS_PROP:
            credentialsProvider = new SystemPropertiesCredentialsProvider();
            break;
        case PROFILE:
            String profileName = configProps.getProperty(AWSConfigConstants.AWS_PROFILE_NAME, null);
            String profileConfigPath = configProps.getProperty(AWSConfigConstants.AWS_PROFILE_PATH, null);
            credentialsProvider = (profileConfigPath == null) ? new ProfileCredentialsProvider(profileName) : new ProfileCredentialsProvider(profileConfigPath, profileName);
            break;
        case BASIC:
            credentialsProvider = new AWSCredentialsProvider() {

                @Override
                public AWSCredentials getCredentials() {
                    return new BasicAWSCredentials(configProps.getProperty(AWSConfigConstants.AWS_ACCESS_KEY_ID), configProps.getProperty(AWSConfigConstants.AWS_SECRET_ACCESS_KEY));
                }

                @Override
                public void refresh() {
                // do nothing
                }
            };
            break;
        default:
        case AUTO:
            credentialsProvider = new DefaultAWSCredentialsProviderChain();
    }
    return credentialsProvider;
}
Example 66
Project: Hadoop-DynamoDB-master  File: DynamoDBConfiguration.java View source code
public AmazonDynamoDBClient getAmazonDynamoDBClient() {
    String accessKey = conf.get(ACCESS_KEY_PROPERTY);
    String secretKey = conf.get(SECRET_KEY_PROPERTY);
    AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
    AmazonDynamoDBClient client = new AmazonDynamoDBClient(credentials);
    String endpoint = conf.get(DYNAMODB_ENDPOINT);
    if (null != endpoint) {
        client.setEndpoint(endpoint);
    }
    return client;
}
Example 67
Project: incubator-provisionr-master  File: ProviderClientCacheSupplier.java View source code
@Override
public AmazonEC2 load(Provider provider) {
    String region = Optional.fromNullable(provider.getOptions().get(ProviderOptions.REGION)).or(ProviderOptions.DEFAULT_REGION);
    AWSCredentials credentials = new BasicAWSCredentials(provider.getAccessKey(), provider.getSecretKey());
    AmazonEC2 client = new AmazonEC2Client(credentials, new ClientConfiguration().withUserAgent(PROVISIONR_USER_AGENT));
    if (provider.getEndpoint().isPresent()) {
        LOG.info("Using endpoint {} as configured", provider.getEndpoint().get());
        client.setEndpoint(provider.getEndpoint().get());
    } else {
        LOG.info(">> Searching endpoint for region {}", region);
        DescribeRegionsRequest request = new DescribeRegionsRequest().withRegionNames(region);
        DescribeRegionsResult result = client.describeRegions(request);
        checkArgument(result.getRegions().size() == 1, "Invalid region name %s. Expected one result found %s", region, result.getRegions());
        LOG.info("<< Using endpoint {} for region {}", result.getRegions().get(0).getEndpoint(), region);
        client.setEndpoint(result.getRegions().get(0).getEndpoint());
    }
    return client;
}
Example 68
Project: incubator-streams-master  File: S3PersistReader.java View source code
@Override
public void prepare(Object configurationObject) {
    lineReaderUtil = LineReadWriteUtil.getInstance(s3ReaderConfiguration);
    // Connect to S3
    synchronized (this) {
        // Create the credentials Object
        AWSCredentials credentials = new BasicAWSCredentials(s3ReaderConfiguration.getKey(), s3ReaderConfiguration.getSecretKey());
        ClientConfiguration clientConfig = new ClientConfiguration();
        clientConfig.setProtocol(Protocol.valueOf(s3ReaderConfiguration.getProtocol().toString()));
        // We do not want path style access
        S3ClientOptions clientOptions = new S3ClientOptions();
        clientOptions.setPathStyleAccess(false);
        this.amazonS3Client = new AmazonS3Client(credentials, clientConfig);
        if (StringUtils.isNotEmpty(s3ReaderConfiguration.getRegion())) {
            this.amazonS3Client.setRegion(Region.getRegion(Regions.fromName(s3ReaderConfiguration.getRegion())));
        }
        this.amazonS3Client.setS3ClientOptions(clientOptions);
    }
    final ListObjectsRequest request = new ListObjectsRequest().withBucketName(this.s3ReaderConfiguration.getBucket()).withPrefix(s3ReaderConfiguration.getReaderPath()).withMaxKeys(500);
    ObjectListing listing = this.amazonS3Client.listObjects(request);
    this.files = new ArrayList<>();
    /*
     * If you can list files that are in this path, then you must be dealing with a directory
     * if you cannot list files that are in this path, then you are most likely dealing with
     * a simple file.
     */
    boolean hasCommonPrefixes = listing.getCommonPrefixes().size() > 0;
    boolean hasObjectSummaries = listing.getObjectSummaries().size() > 0;
    if (hasCommonPrefixes || hasObjectSummaries) {
        // Handle the 'directory' use case
        do {
            if (hasCommonPrefixes) {
                for (String file : listing.getCommonPrefixes()) {
                    this.files.add(file);
                }
            } else {
                for (final S3ObjectSummary objectSummary : listing.getObjectSummaries()) {
                    this.files.add(objectSummary.getKey());
                }
            }
            // get the next batch.
            listing = this.amazonS3Client.listNextBatchOfObjects(listing);
        } while (listing.isTruncated());
    } else {
        // handle the single file use-case
        this.files.add(s3ReaderConfiguration.getReaderPath());
    }
    if (this.files.size() <= 0) {
        LOGGER.error("There are no files to read");
    }
    this.persistQueue = Queues.synchronizedQueue(new LinkedBlockingQueue<StreamsDatum>(10000));
    this.executor = Executors.newSingleThreadExecutor();
}
Example 69
Project: jenkins-aws-plugin-master  File: AWSClients.java View source code
private static AWSCredentials getCredentials(String iamRole, String externalId) {
    if (isEmpty(iamRole))
        return null;
    AWSSecurityTokenServiceClient sts = new AWSSecurityTokenServiceClient();
    int credsDuration = (int) (AWSCodeDeployPublisher.DEFAULT_TIMEOUT_SECONDS * AWSCodeDeployPublisher.DEFAULT_POLLING_FREQUENCY_SECONDS);
    if (credsDuration > 3600) {
        credsDuration = 3600;
    }
    AssumeRoleResult assumeRoleResult = sts.assumeRole(new AssumeRoleRequest().withRoleArn(iamRole).withExternalId(externalId).withDurationSeconds(credsDuration).withRoleSessionName(AWSCodeDeployPublisher.ROLE_SESSION_NAME));
    Credentials stsCredentials = assumeRoleResult.getCredentials();
    BasicSessionCredentials credentials = new BasicSessionCredentials(stsCredentials.getAccessKeyId(), stsCredentials.getSecretAccessKey(), stsCredentials.getSessionToken());
    return credentials;
}
Example 70
Project: jooby-master  File: ConfigCredentialsProviderTest.java View source code
@Test
public void serviceWithDefaultConfig() throws Exception {
    String accessKey = "accessKey";
    String secretKey = "secretKey";
    new MockUnit(Config.class).expect( unit -> {
        Config config = unit.get(Config.class);
        expect(config.hasPath("aws.s3.accessKey")).andReturn(false);
        expect(config.hasPath("aws.s3.secretKey")).andReturn(false);
        expect(config.hasPath("aws.s3.sessionToken")).andReturn(false);
        expect(config.hasPath("aws.sessionToken")).andReturn(false);
        expect(config.getString("aws.accessKey")).andReturn(accessKey);
        expect(config.getString("aws.secretKey")).andReturn(secretKey);
    }).run( unit -> {
        AWSCredentials creds = new ConfigCredentialsProvider(unit.get(Config.class)).service("s3").getCredentials();
        assertEquals("accessKey", creds.getAWSAccessKeyId());
        assertEquals("secretKey", creds.getAWSSecretKey());
    });
}
Example 71
Project: kaif-master  File: AwsSesMailAgent.java View source code
@PostConstruct
public void afterPropertiesSet() {
    AWSCredentials awsSesCredentials = new BasicAWSCredentials(mailProperties.getAwsAccessKey(), mailProperties.getAwsSecretKey());
    this.client = AmazonSimpleEmailServiceClientBuilder.standard().withRegion(Regions.US_EAST_1).withCredentials(new AWSStaticCredentialsProvider(awsSesCredentials)).build();
    logger.info("mail agent ready, sender:" + mailProperties.getAwsSenderAddress() + ", access key:" + awsSesCredentials.getAWSAccessKeyId());
}
Example 72
Project: medusa-glacier-master  File: CloudFormationSample.java View source code
public static void main(String[] args) throws Exception {
    /*
         * The ProfileCredentialsProvider will return your [default]
         * credential profile by reading from the credentials file located at
         * (~/.aws/credentials).
         */
    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider().getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (~/.aws/credentials), and is in valid format.", e);
    }
    AmazonCloudFormation stackbuilder = new AmazonCloudFormationClient(credentials);
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    stackbuilder.setRegion(usWest2);
    System.out.println("===========================================");
    System.out.println("Getting Started with AWS CloudFormation");
    System.out.println("===========================================\n");
    String stackName = "CloudFormationSampleStack";
    String logicalResourceName = "SampleNotificationTopic";
    try {
        // Create a stack
        CreateStackRequest createRequest = new CreateStackRequest();
        createRequest.setStackName(stackName);
        createRequest.setTemplateBody(convertStreamToString(CloudFormationSample.class.getResourceAsStream("CloudFormationSample.template")));
        System.out.println("Creating a stack called " + createRequest.getStackName() + ".");
        stackbuilder.createStack(createRequest);
        // Wait for stack to be created
        // Note that you could use SNS notifications on the CreateStack call to track the progress of the stack creation
        System.out.println("Stack creation completed, the stack " + stackName + " completed with " + waitForCompletion(stackbuilder, stackName));
        // Show all the stacks for this account along with the resources for each stack
        for (Stack stack : stackbuilder.describeStacks(new DescribeStacksRequest()).getStacks()) {
            System.out.println("Stack : " + stack.getStackName() + " [" + stack.getStackStatus().toString() + "]");
            DescribeStackResourcesRequest stackResourceRequest = new DescribeStackResourcesRequest();
            stackResourceRequest.setStackName(stack.getStackName());
            for (StackResource resource : stackbuilder.describeStackResources(stackResourceRequest).getStackResources()) {
                System.out.format("    %1$-40s %2$-25s %3$s\n", resource.getResourceType(), resource.getLogicalResourceId(), resource.getPhysicalResourceId());
            }
        }
        // Lookup a resource by its logical name
        DescribeStackResourcesRequest logicalNameResourceRequest = new DescribeStackResourcesRequest();
        logicalNameResourceRequest.setStackName(stackName);
        logicalNameResourceRequest.setLogicalResourceId(logicalResourceName);
        System.out.format("Looking up resource name %1$s from stack %2$s\n", logicalNameResourceRequest.getLogicalResourceId(), logicalNameResourceRequest.getStackName());
        for (StackResource resource : stackbuilder.describeStackResources(logicalNameResourceRequest).getStackResources()) {
            System.out.format("    %1$-40s %2$-25s %3$s\n", resource.getResourceType(), resource.getLogicalResourceId(), resource.getPhysicalResourceId());
        }
        // Delete the stack
        DeleteStackRequest deleteRequest = new DeleteStackRequest();
        deleteRequest.setStackName(stackName);
        System.out.println("Deleting the stack called " + deleteRequest.getStackName() + ".");
        stackbuilder.deleteStack(deleteRequest);
        // Wait for stack to be deleted
        // Note that you could used SNS notifications on the original CreateStack call to track the progress of the stack deletion
        System.out.println("Stack creation completed, the stack " + stackName + " completed with " + waitForCompletion(stackbuilder, stackName));
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it " + "to AWS CloudFormation, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with AWS CloudFormation, " + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}
Example 73
Project: openhab1-addons-master  File: DynamoDBConfig.java View source code
/**
     *
     * @param config persistence service configuration
     * @return DynamoDB configuration. Returns null in case of configuration errors
     */
public static DynamoDBConfig fromConfig(Map<String, Object> config) {
    if (config == null || config.isEmpty()) {
        logger.error("Configuration not provided! At least AWS region and credentials must be provided.");
        return null;
    }
    try {
        String regionName = (String) config.get("region");
        if (isBlank(regionName)) {
            invalidRegionLogHelp(regionName);
            return null;
        }
        final Region region;
        try {
            region = Region.getRegion(Regions.fromName(regionName));
        } catch (IllegalArgumentException e) {
            invalidRegionLogHelp(regionName);
            return null;
        }
        AWSCredentials credentials;
        String accessKey = (String) config.get("accessKey");
        String secretKey = (String) config.get("secretKey");
        if (!isBlank(accessKey) && !isBlank(secretKey)) {
            logger.debug("accessKey and secretKey specified. Using those.");
            credentials = new BasicAWSCredentials(accessKey, secretKey);
        } else {
            logger.debug("accessKey and/or secretKey blank. Checking profilesConfigFile and profile.");
            String profilesConfigFile = (String) config.get("profilesConfigFile");
            String profile = (String) config.get("profile");
            if (isBlank(profilesConfigFile) || isBlank(profile)) {
                logger.error("Specify either 1) accessKey and secretKey; or 2) profilesConfigFile and " + "profile for providing AWS credentials");
                return null;
            }
            credentials = new ProfilesConfigFile(profilesConfigFile).getCredentials(profile);
        }
        String table = (String) config.get("tablePrefix");
        if (isBlank(table)) {
            logger.debug("Using default table name {}", DEFAULT_TABLE_PREFIX);
            table = DEFAULT_TABLE_PREFIX;
        }
        final boolean createTable;
        String createTableParam = (String) config.get("createTable");
        if (isBlank(createTableParam)) {
            logger.debug("Creating table on demand: {}", DEFAULT_CREATE_TABLE_ON_DEMAND);
            createTable = DEFAULT_CREATE_TABLE_ON_DEMAND;
        } else {
            createTable = Boolean.parseBoolean(createTableParam);
        }
        final long readCapacityUnits;
        String readCapacityUnitsParam = (String) config.get("readCapacityUnits");
        if (isBlank(readCapacityUnitsParam)) {
            logger.debug("Read capacity units: {}", DEFAULT_READ_CAPACITY_UNITS);
            readCapacityUnits = DEFAULT_READ_CAPACITY_UNITS;
        } else {
            readCapacityUnits = Long.parseLong(readCapacityUnitsParam);
        }
        final long writeCapacityUnits;
        String writeCapacityUnitsParam = (String) config.get("writeCapacityUnits");
        if (isBlank(writeCapacityUnitsParam)) {
            logger.debug("Write capacity units: {}", DEFAULT_WRITE_CAPACITY_UNITS);
            writeCapacityUnits = DEFAULT_WRITE_CAPACITY_UNITS;
        } else {
            writeCapacityUnits = Long.parseLong(writeCapacityUnitsParam);
        }
        return new DynamoDBConfig(region, credentials, table, createTable, readCapacityUnits, writeCapacityUnits);
    } catch (Exception e) {
        logger.error("Error with configuration", e);
        return null;
    }
}
Example 74
Project: phresco-master  File: AWSAutoScaleHelper.java View source code
public void setupAutoScaling() {
    // TODO validate that all necessary member variables have been set
    if (desiredSize <= minSize || desiredSize >= maxSize)
        desiredSize = minSize;
    AWSCredentials creds = new BasicAWSCredentials(accessKey, secretKey);
    try {
        AmazonAutoScaling autoScaling = new AmazonAutoScalingClient(creds);
        //Launch configuration
        String launchConfigName = "Phresco-LC-" + autoScaleName;
        CreateLaunchConfigurationRequest launchConfigRequest = new CreateLaunchConfigurationRequest().withLaunchConfigurationName(launchConfigName).withInstanceType(instanceType).withImageId(imageId).withKeyName(keyPair).withSecurityGroups(securityGroup).withUserData(userData);
        autoScaling.createLaunchConfiguration(launchConfigRequest);
        //Auto scaling group
        String autoScalingGroupName = "Phresco-ASG-" + autoScaleName;
        CreateAutoScalingGroupRequest autoScalingGroupRequest = new CreateAutoScalingGroupRequest().withAutoScalingGroupName(autoScalingGroupName).withAvailabilityZones(availabilityZones).withLaunchConfigurationName(launchConfigName).withMinSize(minSize).withMaxSize(maxSize).withDesiredCapacity(desiredSize);
        if (!isBlank(loadBalancer)) {
            ArrayList<String> lbs = new ArrayList<String>();
            lbs.add(loadBalancer);
            autoScalingGroupRequest.setLoadBalancerNames(lbs);
        }
        autoScaling.createAutoScalingGroup(autoScalingGroupRequest);
        //Scale up policy
        String scaleUpPolicyName = "Phresco-Pol-Up-" + autoScaleName;
        PutScalingPolicyRequest scaleUpPolicyRequest = new PutScalingPolicyRequest().withPolicyName(scaleUpPolicyName).withAutoScalingGroupName(autoScalingGroupName).withScalingAdjustment(scaleUpAdjustment).withAdjustmentType(scaleUpAdjustmentType).withCooldown(scaleUpCooldown);
        PutScalingPolicyResult scaleUpPolicyResult = autoScaling.putScalingPolicy(scaleUpPolicyRequest);
        AmazonCloudWatch cloudWatch = new AmazonCloudWatchClient(creds);
        //Scale up alarm
        String upAlarmName = "Phresco-Pol-UpAlarm-" + autoScaleName;
        PutMetricAlarmRequest upAlarmRequest = new PutMetricAlarmRequest().withAlarmName(upAlarmName).withEvaluationPeriods(scaleUpEvalPeriods).withPeriod(scaleUpPeriod).withComparisonOperator(scaleUpComparison).withMetricName(scaleUpMetric).withNamespace(scaleUpNamespace).withStatistic(scaleUpStatistic).withThreshold(scaleUpThreshold).withUnit(scaleUpUnit).withAlarmActions(scaleUpPolicyResult.getPolicyARN());
        cloudWatch.putMetricAlarm(upAlarmRequest);
        //Scale down policy
        String scaleDownPolicyName = "Phresco-Pol-Dn-" + autoScaleName;
        PutScalingPolicyRequest scaleDownPolicyRequest = new PutScalingPolicyRequest().withPolicyName(scaleDownPolicyName).withAutoScalingGroupName(autoScalingGroupName).withScalingAdjustment(scaleDownAdjustment).withAdjustmentType(scaleDownAdjustmentType).withCooldown(scaleDownCooldown);
        PutScalingPolicyResult scaleDownPolicyResult = autoScaling.putScalingPolicy(scaleDownPolicyRequest);
        //Scale down alarm
        String downAlarmName = "Phresco-Pol-DnAlarm-" + autoScaleName;
        PutMetricAlarmRequest downAlarmRequest = new PutMetricAlarmRequest().withAlarmName(downAlarmName).withEvaluationPeriods(scaleDownEvalPeriods).withPeriod(scaleDownPeriod).withComparisonOperator(scaleDownComparison).withMetricName(scaleDownMetric).withNamespace(scaleDownNamespace).withStatistic(scaleDownStatistic).withThreshold(scaleDownThreshold).withUnit(scaleDownUnit).withAlarmActions(scaleDownPolicyResult.getPolicyARN());
        cloudWatch.putMetricAlarm(downAlarmRequest);
    } catch (AmazonServiceException ase) {
        ase.printStackTrace();
    } catch (AmazonClientException ace) {
        ace.printStackTrace();
    } catch (Exception e) {
        e.printStackTrace();
    }
}
Example 75
Project: seqware-master  File: S3UploadDirectory.java View source code
@Override
public ReturnValue do_run() {
    ReturnValue ret = new ReturnValue();
    ret.setExitStatus(ReturnValue.SUCCESS);
    String accessKey = null;
    String secretKey = null;
    try {
        HashMap<String, String> settings = (HashMap<String, String>) ConfigTools.getSettings();
        accessKey = settings.get(SqwKeys.AWS_ACCESS_KEY.getSettingKey());
        secretKey = settings.get(SqwKeys.AWS_SECRET_KEY.getSettingKey());
    } catch (Exception e) {
        Log.error(e.getMessage());
        ret.setExitStatus(ReturnValue.FAILURE);
        return ret;
    }
    if (accessKey == null || secretKey == null) {
        Log.error("Couldn't find access or secret key for S3 output so will exit!");
        ret.setExitStatus(ReturnValue.FAILURE);
        return ret;
    }
    AWSCredentials myCredentials = new BasicAWSCredentials(accessKey, secretKey);
    TransferManager tx = new TransferManager(myCredentials);
    ret = recursivelyUploadDir(options.valueOf("input-dir").toString(), options.valueOf("output-bucket").toString(), options.valueOf("output-prefix").toString(), tx);
    return ret;
}
Example 76
Project: servo-master  File: AwsInjectableTag.java View source code
static String getAutoScaleGroup() {
    try {
        String credFileProperty = System.getProperties().getProperty(AwsPropertyKeys.AWS_CREDENTIALS_FILE.getBundle());
        AWSCredentials credentials;
        if (credFileProperty != null) {
            credentials = new PropertiesCredentials(new File(credFileProperty));
        } else {
            credentials = new DefaultAWSCredentialsProviderChain().getCredentials();
        }
        AmazonAutoScaling autoScalingClient = AwsServiceClients.autoScaling(credentials);
        List<AutoScalingInstanceDetails> autoScalingInstances = autoScalingClient.describeAutoScalingInstances(new DescribeAutoScalingInstancesRequest().withInstanceIds(getInstanceId())).getAutoScalingInstances();
        return autoScalingInstances.isEmpty() ? UNDEFINED : autoScalingInstances.get(0).getAutoScalingGroupName();
    } catch (Exception e) {
        getLogger().error("Unable to get ASG name.", e);
        return UNDEFINED;
    }
}
Example 77
Project: sfs-master  File: AwsKms.java View source code
public Observable<Void> start(VertxContext<Server> vertxContext, JsonObject config) {
    AwsKms _this = this;
    SfsVertx sfsVertx = vertxContext.vertx();
    Context context = sfsVertx.getOrCreateContext();
    return Defer.aVoid().filter( aVoid -> started.compareAndSet(false, true)).flatMap( aVoid -> {
        String keyStoreAwsKmsEndpoint = ConfigHelper.getFieldOrEnv(config, "keystore.aws.kms.endpoint");
        Preconditions.checkArgument(keyStoreAwsKmsEndpoint != null, "keystore.aws.kms.endpoint is required");
        _this.keyId = ConfigHelper.getFieldOrEnv(config, "keystore.aws.kms.key_id");
        Preconditions.checkArgument(_this.keyId != null, "keystore.aws.kms.key_id is required");
        _this.accessKeyId = ConfigHelper.getFieldOrEnv(config, "keystore.aws.kms.access_key_id");
        Preconditions.checkArgument(_this.accessKeyId != null, "keystore.aws.kms.access_key_id is required");
        _this.secretKey = ConfigHelper.getFieldOrEnv(config, "keystore.aws.kms.secret_key");
        Preconditions.checkArgument(_this.secretKey != null, "keystore.aws.kms.secret_key is required");
        return RxHelper.executeBlocking(context, sfsVertx.getBackgroundPool(), () -> {
            kms = new AWSKMSClient(new AWSCredentials() {

                @Override
                public String getAWSAccessKeyId() {
                    return _this.accessKeyId;
                }

                @Override
                public String getAWSSecretKey() {
                    return _this.secretKey;
                }
            });
            kms.setEndpoint(keyStoreAwsKmsEndpoint);
            return (Void) null;
        });
    }).singleOrDefault(null);
}
Example 78
Project: spring-cloud-aws-master  File: ContextCredentialsBeanDefinitionParserTest.java View source code
@Test
public void testWithPlaceHolder() throws Exception {
    ApplicationContext applicationContext = new ClassPathXmlApplicationContext(getClass().getSimpleName() + "-testWithPlaceHolder.xml", getClass());
    AWSCredentialsProvider awsCredentialsProvider = applicationContext.getBean(AWSCredentialsProvider.class);
    AWSCredentials credentials = awsCredentialsProvider.getCredentials();
    assertEquals("foo", credentials.getAWSAccessKeyId());
    assertEquals("bar", credentials.getAWSSecretKey());
}
Example 79
Project: spring-data-simpledb-master  File: SimpleDb.java View source code
@Override
public final void afterPropertiesSet() {
    final AWSCredentials awsCredentials = new AWSCredentials() {

        @Override
        public String getAWSAccessKeyId() {
            return accessID;
        }

        @Override
        public String getAWSSecretKey() {
            return secretKey;
        }
    };
    this.simpleDbClient = new AmazonSimpleDBClient(awsCredentials);
    simpleDbDomain = new SimpleDbDomain(domainPrefix);
}
Example 80
Project: ajah-master  File: AmazonSESTransport.java View source code
/**
	 * Sends a message through Amazon's SES service.
	 * 
	 * @param message
	 *            The message to send. Subject, from and to are required.
	 * @throws AddressException
	 *             If there is a problem with one of the email addresses.
	 * @throws MessagingException
	 *             If there is a problem with the transport of the message.
	 */
@Override
public void send(final EmailMessage message) throws AddressException, MessagingException {
    AjahUtils.requireParam(message, "message");
    AjahUtils.requireParam(message.getSubject(), "message.subject");
    AjahUtils.requireParam(message.getFrom(), "message.from");
    AjahUtils.requireParam(message.getRecipients(), "message.recipients");
    final AWSCredentials credentials = new BasicAWSCredentials(Config.i.get("aws.accessKey", null), Config.i.get("aws.secretKey", null));
    if (Config.i.getBoolean("aws.ses.verify", false)) {
        // Verification is active so we'll need to check that first
        final AmazonSimpleEmailService email = new AmazonSimpleEmailServiceClient(credentials);
        final ListVerifiedEmailAddressesResult verifiedEmails = email.listVerifiedEmailAddresses();
        boolean verified = true;
        if (!isVerified(message.getFrom(), email, verifiedEmails)) {
            log.warning("Sender " + message.getFrom() + " is not verified");
            verified = false;
        }
        for (final EmailRecipient emailRecipient : message.getRecipients()) {
            if (!isVerified(emailRecipient.getAddress(), email, verifiedEmails)) {
                log.warning("Recipient " + emailRecipient.getAddress() + " is not verified");
                verified = false;
            }
        }
        if (!verified) {
            throw new MessagingException("Message not sent because one or more addresses need to be verified");
        }
    }
    final Properties props = new Properties();
    props.setProperty("mail.transport.protocol", "aws");
    props.setProperty("mail.aws.user", credentials.getAWSAccessKeyId());
    props.setProperty("mail.aws.password", credentials.getAWSSecretKey());
    final Session session = Session.getInstance(props);
    final MimeMessage mimeMessage = new MimeMessage(session);
    mimeMessage.setFrom(new InternetAddress(message.getFrom().toString()));
    for (final EmailRecipient emailRecipient : message.getRecipients()) {
        switch(emailRecipient.getType()) {
            case BCC:
                mimeMessage.addRecipient(Message.RecipientType.BCC, new InternetAddress(emailRecipient.toString()));
                break;
            case CC:
                mimeMessage.addRecipient(Message.RecipientType.CC, new InternetAddress(emailRecipient.toString()));
                break;
            case TO:
                mimeMessage.addRecipient(Message.RecipientType.TO, new InternetAddress(emailRecipient.toString()));
                break;
            default:
                mimeMessage.addRecipient(Message.RecipientType.TO, new InternetAddress(emailRecipient.toString()));
                break;
        }
    }
    mimeMessage.setSubject(message.getSubject());
    final String htmlContent = message.getHtml();
    if (StringUtils.isBlank(htmlContent)) {
        // No HTML so we'll just send a plaintext message.
        mimeMessage.setText(message.getText());
    } else {
        final Multipart multiPart = new MimeMultipart("alternative");
        final BodyPart text = new MimeBodyPart();
        text.setText(message.getText());
        multiPart.addBodyPart(text);
        final BodyPart html = new MimeBodyPart();
        html.setContent(message.getHtml(), "text/html");
        multiPart.addBodyPart(html);
        mimeMessage.setContent(multiPart);
    }
    mimeMessage.saveChanges();
    final Transport transport = new AWSJavaMailTransport(session, null);
    transport.connect();
    transport.sendMessage(mimeMessage, null);
    transport.close();
}
Example 81
Project: aws-quickstart-master  File: AwsSqsSimpleExample.java View source code
public static void main(String[] args) throws Exception {
    /*
         * The ProfileCredentialsProvider will return your [default]
         * credential profile by reading from the credentials file located at
         * (~/.aws/credentials).
         */
    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider().getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (~/.aws/credentials), and is in valid format.", e);
    }
    AmazonSQS sqs = new AmazonSQSClient(credentials);
    Region usWest2 = Region.getRegion(Regions.US_EAST_1);
    sqs.setRegion(usWest2);
    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon SQS");
    System.out.println("===========================================\n");
    String queue_name = "com-rodosaenz-examples-aws-sqs";
    try {
        // Create a queue
        System.out.println("Creating a new SQS queue called " + queue_name + ".\n");
        CreateQueueRequest createQueueRequest = new CreateQueueRequest(queue_name);
        String myQueueUrl = sqs.createQueue(createQueueRequest).getQueueUrl();
        // List queues
        System.out.println("Listing all queues in your account.\n");
        for (String queueUrl : sqs.listQueues().getQueueUrls()) {
            System.out.println("  QueueUrl: " + queueUrl);
        }
        System.out.println();
        // Send a message
        System.out.println("Sending a message to " + queue_name + ".\n");
        sqs.sendMessage(new SendMessageRequest(myQueueUrl, "This is my message text."));
        // Receive messages
        System.out.println("Receiving messages from MyQueue.\n");
        ReceiveMessageRequest receiveMessageRequest = new ReceiveMessageRequest(myQueueUrl);
        receiveMessageRequest.setMaxNumberOfMessages(1);
        List<Message> messages = sqs.receiveMessage(receiveMessageRequest).getMessages();
        for (Message message : messages) {
            System.out.println("  Message");
            System.out.println("    MessageId:     " + message.getMessageId());
            System.out.println("    ReceiptHandle: " + message.getReceiptHandle());
            System.out.println("    MD5OfBody:     " + message.getMD5OfBody());
            System.out.println("    Body:          " + message.getBody());
            for (Entry<String, String> entry : message.getAttributes().entrySet()) {
                System.out.println("  Attribute");
                System.out.println("    Name:  " + entry.getKey());
                System.out.println("    Value: " + entry.getValue());
            }
        }
        System.out.println();
        // Delete a message
        System.out.println("Deleting a message.\n");
        String messageReceiptHandle = messages.get(0).getReceiptHandle();
        sqs.deleteMessage(new DeleteMessageRequest(myQueueUrl, messageReceiptHandle));
        //Get attributes
        GetQueueAttributesRequest request = new GetQueueAttributesRequest(myQueueUrl).withAttributeNames("All");
        final Map<String, String> attributes = sqs.getQueueAttributes(request).getAttributes();
        System.out.println("  Policy: " + attributes.get("Policy"));
        System.out.println("  MessageRetentionPeriod: " + attributes.get("MessageRetentionPeriod"));
        System.out.println("  MaximumMessageSize: " + attributes.get("MaximumMessageSize"));
        System.out.println("  CreatedTimestamp: " + attributes.get("CreatedTimestamp"));
        System.out.println("  VisibilityTimeout: " + attributes.get("VisibilityTimeout"));
        System.out.println("  QueueArn: " + attributes.get("QueueArn"));
        System.out.println("  ApproximateNumberOfMessages: " + attributes.get("ApproximateNumberOfMessages"));
        System.out.println("  ApproximateNumberOfMessagesNotVisible: " + attributes.get("ApproximateNumberOfMessagesNotVisible"));
        System.out.println("  DelaySeconds: " + attributes.get("DelaySeconds"));
        // Delete a queue
        System.out.println("Deleting the test queue.\n");
        sqs.deleteQueue(new DeleteQueueRequest(myQueueUrl));
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon SQS, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with SQS, such as not " + "being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}
Example 82
Project: cloudstack-master  File: S3Utils.java View source code
public static TransferManager getTransferManager(final ClientOptions clientOptions) {
    if (TRANSFERMANAGER_ACCESSKEY_MAP.containsKey(clientOptions.getAccessKey())) {
        return TRANSFERMANAGER_ACCESSKEY_MAP.get(clientOptions.getAccessKey());
    }
    final AWSCredentials basicAWSCredentials = new BasicAWSCredentials(clientOptions.getAccessKey(), clientOptions.getSecretKey());
    final ClientConfiguration configuration = new ClientConfiguration();
    if (clientOptions.isHttps() != null) {
        configuration.setProtocol(clientOptions.isHttps() ? HTTPS : HTTP);
    }
    if (clientOptions.getConnectionTimeout() != null) {
        configuration.setConnectionTimeout(clientOptions.getConnectionTimeout());
    }
    if (clientOptions.getMaxErrorRetry() != null) {
        configuration.setMaxErrorRetry(clientOptions.getMaxErrorRetry());
    }
    if (clientOptions.getSocketTimeout() != null) {
        configuration.setSocketTimeout(clientOptions.getSocketTimeout());
    }
    if (clientOptions.getUseTCPKeepAlive() != null) {
        configuration.setUseTcpKeepAlive(clientOptions.getUseTCPKeepAlive());
    }
    if (clientOptions.getConnectionTtl() != null) {
        configuration.setConnectionTTL(clientOptions.getConnectionTtl());
    }
    if (clientOptions.getSigner() != null) {
        configuration.setSignerOverride(clientOptions.getSigner());
    }
    LOGGER.debug(format("Creating S3 client with configuration: [protocol: %1$s, signer: %2$s, connectionTimeOut: %3$s, maxErrorRetry: %4$s, socketTimeout: %5$s, useTCPKeepAlive: %6$s, connectionTtl: %7$s]", configuration.getProtocol(), configuration.getSignerOverride(), configuration.getConnectionTimeout(), configuration.getMaxErrorRetry(), configuration.getSocketTimeout(), clientOptions.getUseTCPKeepAlive(), clientOptions.getConnectionTtl()));
    final AmazonS3Client client = new AmazonS3Client(basicAWSCredentials, configuration);
    if (isNotBlank(clientOptions.getEndPoint())) {
        LOGGER.debug(format("Setting the end point for S3 client with access key %1$s to %2$s.", clientOptions.getAccessKey(), clientOptions.getEndPoint()));
        client.setEndpoint(clientOptions.getEndPoint());
    }
    TRANSFERMANAGER_ACCESSKEY_MAP.put(clientOptions.getAccessKey(), new TransferManager(client));
    return TRANSFERMANAGER_ACCESSKEY_MAP.get(clientOptions.getAccessKey());
}
Example 83
Project: cmb-master  File: AdminServletBase.java View source code
protected String httpPOST(String baseUrl, String urlString, AWSCredentials awsCredentials) {
    URL url;
    HttpURLConnection conn;
    BufferedReader br;
    String line;
    String doc = "";
    try {
        String urlPost = urlString.substring(0, urlString.indexOf("?"));
        url = new URL(urlPost);
        conn = (HttpURLConnection) url.openConnection();
        conn.setRequestMethod("POST");
        CreateQueueRequest createQueueRequest = new CreateQueueRequest("test");
        Request<CreateQueueRequest> request = new CreateQueueRequestMarshaller().marshall(createQueueRequest);
        //set parameters from url
        String parameterString = urlString.substring(urlString.indexOf("?") + 1);
        String[] parameterArray = parameterString.split("&");
        Map<String, String> requestParameters = new HashMap<String, String>();
        for (int i = 0; i < parameterArray.length; i++) {
            requestParameters.put(parameterArray[i].substring(0, parameterArray[i].indexOf("=")), parameterArray[i].substring(parameterArray[i].indexOf("=") + 1));
        }
        request.setParameters(requestParameters);
        //get endpoint from url
        URI uri = new URI(baseUrl);
        request.setEndpoint(uri);
        String resourcePath = urlString.substring(baseUrl.length(), urlString.indexOf("?"));
        request.setResourcePath(resourcePath);
        AWS4Signer aws4Signer = new AWS4Signer();
        String host = uri.getHost();
        aws4Signer.setServiceName(host);
        aws4Signer.sign(request, awsCredentials);
        //set headers for real request
        for (Entry<String, String> entry : request.getHeaders().entrySet()) {
            conn.setRequestProperty(entry.getKey(), entry.getValue());
        }
        // Send post request
        conn.setDoOutput(true);
        DataOutputStream wr = new DataOutputStream(conn.getOutputStream());
        StringBuffer bodyStringBuffer = new StringBuffer();
        for (Entry<String, String> entry : requestParameters.entrySet()) {
            bodyStringBuffer.append(entry.getKey() + "=" + entry.getValue() + "&");
        }
        String bodyString = "";
        if (bodyStringBuffer.length() > 0) {
            bodyString = bodyStringBuffer.substring(0, bodyStringBuffer.length() - 1);
        }
        wr.writeBytes(bodyString);
        wr.flush();
        wr.close();
        br = new BufferedReader(new InputStreamReader(conn.getInputStream()));
        while ((line = br.readLine()) != null) {
            doc += line;
        }
        br.close();
        logger.info("event=http_get url=" + urlString);
    } catch (Exception ex) {
        logger.error("event=http_get url=" + urlString, ex);
    }
    return doc;
}
Example 84
Project: code-master  File: ProductCategoryPriceIndex.java View source code
/**
     * The only information needed to create a client are security credentials
     * consisting of the AWS Access Key ID and Secret Access Key. All other
     * configuration, such as the service endpoints, are performed
     * automatically. Client parameters, such as proxies, can be specified in an
     * optional ClientConfiguration object when constructing a client.
     *
     * @see com.amazonaws.auth.BasicAWSCredentials
     * @see com.amazonaws.auth.ProfilesConfigFile
     * @see com.amazonaws.ClientConfiguration
     */
private static void init() throws Exception {
    /*
         * The ProfileCredentialsProvider will return your [default]
         * credential profile by reading from the credentials file located at
         * (/Users/usdgadiraj/.aws/credentials).
         */
    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider("default").getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (/Users/usdgadiraj/.aws/credentials), and is in valid format.", e);
    }
    dynamoDB = new AmazonDynamoDBClient(credentials);
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    dynamoDB.setRegion(usWest2);
}
Example 85
Project: dataservices-sdk-java-master  File: ViPRS3Signer.java View source code
@Override
public void sign(Request<?> request, AWSCredentials credentials) throws AmazonClientException {
    if (credentials == null || credentials.getAWSSecretKey() == null) {
        log.debug("Canonical string will not be signed, as no AWS Secret Key was provided");
        return;
    }
    AWSCredentials sanitizedCredentials = sanitizeCredentials(credentials);
    if (sanitizedCredentials instanceof AWSSessionCredentials) {
        addSessionCredentials(request, (AWSSessionCredentials) sanitizedCredentials);
    }
    /*
         * In s3 sigv2, the way slash characters are encoded should be
         * consistent in both the request url and the encoded resource path.
         * Since we have to encode "//" to "/%2F" in the request url to make
         * httpclient works, we need to do the same encoding here for the
         * resource path.
         */
    String encodedResourcePath = HttpUtils.appendUri(request.getEndpoint().getPath(), resourcePath, true);
    Date date = getSignatureDate(request.getTimeOffset());
    request.addHeader(Headers.DATE, ServiceUtils.formatRfc822Date(date));
    String canonicalString = makeS3CanonicalString(httpVerb, encodedResourcePath, request, null);
    log.debug("Calculated string to sign:\n\"" + canonicalString + "\"");
    String signature = super.signAndBase64Encode(canonicalString, sanitizedCredentials.getAWSSecretKey(), SigningAlgorithm.HmacSHA1);
    request.addHeader("Authorization", "AWS " + sanitizedCredentials.getAWSAccessKeyId() + ":" + signature);
}
Example 86
Project: es-amazon-s3-river-master  File: S3Connector.java View source code
/**
    * Connect to the specified bucket using previously given accesskey and secretkey.
    * @param bucketName Name of the bucket to connect to
    * @param pathPrefix Prefix that will be later used for filtering documents
    * @throws AmazonS3Exception when access or secret keys are wrong or bucket does not exists
    */
public void connectUserBucket(String bucketName, String pathPrefix) throws AmazonS3Exception {
    this.bucketName = bucketName;
    this.pathPrefix = pathPrefix;
    if (accessKey != null && secretKey != null) {
        AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
        s3Client = new AmazonS3Client(credentials);
    } else if (useIAMRoleForEC2) {
        // Force usage of IAM Role process as described into
        // http://docs.aws.amazon.com/AWSSdkDocsJava/latest/DeveloperGuide/java-dg-roles.html.
        s3Client = new AmazonS3Client(new InstanceProfileCredentialsProvider());
    } else {
        // Default credentials retrieval or IAM Role process as described into
        // http://docs.aws.amazon.com/AWSSdkDocsJava/latest/DeveloperGuide/java-dg-roles.html.
        s3Client = new AmazonS3Client();
    }
    // Getting location seems odd as we don't use it later and doesBucketExists() seems
    // more appropriate... However, this later returns true even for non existing buckets !
    s3Client.getBucketLocation(bucketName);
}
Example 87
Project: jackrabbit-master  File: Utils.java View source code
/**
     * Create AmazonS3Client from properties.
     * 
     * @param prop properties to configure @link {@link AmazonS3Client}
     * @return {@link AmazonS3Client}
     */
public static AmazonS3Client openService(final Properties prop) {
    String accessKey = prop.getProperty(S3Constants.ACCESS_KEY);
    String secretKey = prop.getProperty(S3Constants.SECRET_KEY);
    AmazonS3Client s3service = null;
    if (StringUtils.isNullOrEmpty(accessKey) || StringUtils.isNullOrEmpty(secretKey)) {
        LOG.info("Configuring Amazon Client from environment");
        s3service = new AmazonS3Client(getClientConfiguration(prop));
    } else {
        LOG.info("Configuring Amazon Client from property file.");
        AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
        s3service = new AmazonS3Client(credentials, getClientConfiguration(prop));
    }
    String region = prop.getProperty(S3Constants.S3_REGION);
    String endpoint = null;
    String propEndPoint = prop.getProperty(S3Constants.S3_END_POINT);
    if ((propEndPoint != null) && !"".equals(propEndPoint)) {
        endpoint = propEndPoint;
    } else {
        if (StringUtils.isNullOrEmpty(region)) {
            com.amazonaws.regions.Region s3Region = Regions.getCurrentRegion();
            if (s3Region != null) {
                region = s3Region.getName();
            } else {
                throw new AmazonClientException("parameter [" + S3Constants.S3_REGION + "] not configured and cannot be derived from environment");
            }
        }
        if (DEFAULT_AWS_BUCKET_REGION.equals(region)) {
            endpoint = S3 + DOT + AWSDOTCOM;
        } else if (Region.EU_Ireland.toString().equals(region)) {
            endpoint = "s3-eu-west-1" + DOT + AWSDOTCOM;
        } else {
            endpoint = S3 + DASH + region + DOT + AWSDOTCOM;
        }
    }
    /*
         * setting endpoint to remove latency of redirection. If endpoint is
         * not set, invocation first goes us standard region, which
         * redirects it to correct location.
         */
    s3service.setEndpoint(endpoint);
    LOG.info("S3 service endpoint [{}] ", endpoint);
    return s3service;
}
Example 88
Project: jmx-cloudwatch-reporter-master  File: ReporterAgent.java View source code
public void init(String agentArguments) throws IOException {
    String configFile = agentArguments != null ? agentArguments : "jmx-cloudwatch.json";
    readConfig(configFile);
    initLogger();
    initAllowedBeans();
    AWSCredentials awsCredentials = new BasicAWSCredentials(settings.awsAccessKey, settings.awsSecretKey);
    //set up listeners for jmx events, so we get notifications for all new beans registered (premain runs "pre main"
    addBeanNotificationListener();
    registerPlatformBeans();
    initReporter(awsCredentials);
}
Example 89
Project: march4-master  File: AWSJavaMailSample.java View source code
public static void main(String[] args) throws IOException {
    /*
         * This credentials provider implementation loads your AWS credentials
         * from a properties file at the root of your classpath.
         *
         * Important: Be sure to fill in your AWS access credentials in the
         *            AwsCredentials.properties file before you try to run this sample.
         *            http://aws.amazon.com/security-credentials
         */
    AWSCredentials credentials = new ClasspathPropertiesFileCredentialsProvider().getCredentials();
    AmazonSimpleEmailService ses = new AmazonSimpleEmailServiceClient(credentials);
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    ses.setRegion(usWest2);
    /*
         * Before you can send email via Amazon SES, you need to verify that you
         * own the email address from which you???ll be sending email. This will
         * trigger a verification email, which will contain a link that you can
         * click on to complete the verification process.
         */
    verifyEmailAddress(ses, FROM);
    /*
         * If you've just signed up for SES, then you'll be placed in the Amazon
         * SES sandbox, where you must also verify the email addresses you want
         * to send mail to.
         *
         * You can uncomment the line below to verify the TO address in this
         * sample.
         *
         * Once you have full access to Amazon SES, you will *not* be required
         * to verify each email address you want to send mail to.
         *
         * You can request full access to Amazon SES here:
         * http://aws.amazon.com/ses/fullaccessrequest
         */
    //verifyEmailAddress(ses, TO);
    /*
		 * Setup JavaMail to use the Amazon Simple Email Service by specifying
		 * the "aws" protocol.
		 */
    Properties props = new Properties();
    props.setProperty("mail.transport.protocol", "aws");
    /*
         * Setting mail.aws.user and mail.aws.password are optional. Setting
         * these will allow you to send mail using the static transport send()
         * convince method.  It will also allow you to call connect() with no
         * parameters. Otherwise, a user name and password must be specified
         * in connect.
         */
    props.setProperty("mail.aws.user", credentials.getAWSAccessKeyId());
    props.setProperty("mail.aws.password", credentials.getAWSSecretKey());
    Session session = Session.getInstance(props);
    try {
        // Create a new Message
        Message msg = new MimeMessage(session);
        msg.setFrom(new InternetAddress(FROM));
        msg.addRecipient(Message.RecipientType.TO, new InternetAddress(TO));
        msg.setSubject(SUBJECT);
        msg.setText(BODY);
        msg.saveChanges();
        // Reuse one Transport object for sending all your messages
        // for better performance
        Transport t = new AWSJavaMailTransport(session, null);
        t.connect();
        t.sendMessage(msg, null);
        // Close your transport when you're completely done sending
        // all your messages
        t.close();
    } catch (AddressException e) {
        e.printStackTrace();
        System.out.println("Caught an AddressException, which means one or more of your " + "addresses are improperly formatted.");
    } catch (MessagingException e) {
        e.printStackTrace();
        System.out.println("Caught a MessagingException, which means that there was a " + "problem sending your message to Amazon's E-mail Service check the " + "stack trace for more information.");
    }
}
Example 90
Project: metamodel-master  File: DynamoDbDataContextIntegrationTest.java View source code
@Before
public void before() throws Exception {
    final String userHome = System.getProperty("user.home");
    final String propertiesFilename = userHome + "/metamodel-integrationtest-configuration.properties";
    final File file = new File(propertiesFilename);
    Assume.assumeTrue(file.exists());
    final Properties props = new Properties();
    props.load(new FileReader(file));
    final String accessKey = props.getProperty("dynamodb.accessKey");
    final String secretKey = props.getProperty("dynamodb.secretKey");
    Assume.assumeNotNull(accessKey, secretKey);
    final Regions region = Regions.fromName(props.getProperty("dynamodb.region", Regions.DEFAULT_REGION.getName()));
    final AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
    final AWSCredentialsProvider credentialsProvider = new AWSStaticCredentialsProvider(credentials);
    client = AmazonDynamoDBClientBuilder.standard().withRegion(region).withCredentials(credentialsProvider).build();
}
Example 91
Project: s3-wagon-private-master  File: PrivateS3Wagon.java View source code
@Override
protected void connectToRepository(Repository repository, AuthenticationInfo authenticationInfo, ProxyInfoProvider proxyInfoProvider) throws AuthenticationException {
    if (this.amazonS3 == null) {
        ClientConfiguration clientConfiguration = S3Utils.getClientConfiguration(proxyInfoProvider);
        this.bucketName = S3Utils.getBucketName(repository);
        this.baseDirectory = S3Utils.getBaseDirectory(repository);
        if (authenticationInfo == null) {
            this.amazonS3 = new AmazonS3Client(new DefaultAWSCredentialsProviderChain(), clientConfiguration);
        } else {
            AWSCredentials awsCredentials = new AuthenticationInfoAWSCredentials(authenticationInfo);
            this.amazonS3 = new AmazonS3Client(awsCredentials, clientConfiguration);
        }
        try {
            com.amazonaws.regions.Region region = parseRegion(new DefaultAwsRegionProviderChain().getRegion());
            if (!region.getPartition().equals("aws")) {
                this.amazonS3.setRegion(region);
            } else {
                detectEndpointFromBucket();
            }
        } catch (AmazonClientException e) {
            detectEndpointFromBucket();
        }
    }
}
Example 92
Project: SE252-JAN2015-master  File: SDBConn.java View source code
public static LayerInfo[] getCategories() {
    if (sdb == null) {
        AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
        sdb = new AmazonSimpleDBClient(credentials);
        sdb.setRegion(Region.getRegion(Regions.AP_SOUTHEAST_1));
    }
    LayerInfo[] layerArray = null;
    ArrayList<LayerInfo> layers = new ArrayList<LayerInfo>();
    try {
        String queryString = "select CatName from `" + cat + "`";
        SelectRequest selectRequest = new SelectRequest(queryString);
        LayerInfo lf = new LayerInfo();
        lf.setName("Dining Outside");
        lf.setCategory("");
        lf.setLatitude(0);
        lf.setLongitude(0);
        lf.setDescription("");
        layers.add(lf);
        for (Item item : sdb.select(selectRequest).getItems()) {
            lf = new LayerInfo();
            lf.setCategory("");
            lf.setLatitude(0);
            lf.setLongitude(0);
            lf.setDescription("");
            for (Attribute attribute : item.getAttributes()) {
                if (attribute.getName().equals("CatName"))
                    lf.setName(attribute.getValue());
            }
            layers.add(lf);
        }
    } catch (Exception e) {
        System.out.println(e);
    }
    layerArray = new LayerInfo[layers.size()];
    layers.toArray(layerArray);
    return layerArray;
}
Example 93
Project: streamreduce-core-master  File: CamelFacade.java View source code
private static SQSClientAndEndPointPair setupSqsEndpointAndClient(String queueName, String environment) throws UnsupportedEncodingException {
    Properties cloudProperties = PropertiesOverrideLoader.loadProperties("cloud.properties");
    String accessKeyId = cloudProperties.getProperty("nodeable.aws.accessKeyId");
    String secretKey = cloudProperties.getProperty("nodeable.aws.secretKey");
    long messageRetentionPeriod = TimeUnit.DAYS.toSeconds(14);
    String actualQueueName = SqsQueueNameFormatter.formatSqsQueueName(queueName, environment);
    AWSCredentials awsCredentials = new BasicAWSCredentials(accessKeyId, secretKey);
    AmazonSQSClient sqsClient = new AmazonSQSClient(awsCredentials);
    String endpoint = String.format("aws-sqs://" + actualQueueName + "?amazonSQSClient=#amazonSQSClient&" + "messageRetentionPeriod=%d", messageRetentionPeriod);
    return new SQSClientAndEndPointPair(sqsClient, endpoint);
}
Example 94
Project: voltdb-master  File: KinesisStreamImporterConfig.java View source code
/**
     * connect to kinesis stream to discover the shards on the stream
     *
     * @param regionName The region name where the stream resides
     * @param streamName The kinesis stream name
     * @param accessKey The user access key
     * @param secretKey The user secret key
     * @param appName  The name of stream application
     * @return a list of shards
     */
public static List<Shard> discoverShards(String regionName, String streamName, String accessKey, String secretKey, String appName) {
    try {
        Region region = RegionUtils.getRegion(regionName);
        if (region != null) {
            final AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
            AmazonKinesis kinesisClient = new AmazonKinesisClient(credentials, getClientConfigWithUserAgent(appName));
            kinesisClient.setRegion(region);
            DescribeStreamResult result = kinesisClient.describeStream(streamName);
            if (!"ACTIVE".equals(result.getStreamDescription().getStreamStatus())) {
                throw new IllegalArgumentException("Kinesis stream " + streamName + " is not active.");
            }
            return result.getStreamDescription().getShards();
        }
    } catch (ResourceNotFoundException e) {
        LOGGER.warn("Kinesis stream " + streamName + " does not exist.", e);
    } catch (Exception e) {
        LOGGER.warn("Error found while describing the kinesis stream " + streamName, e);
    }
    return null;
}
Example 95
Project: zipkin-master  File: ZipkinElasticsearchAwsStorageAutoConfiguration.java View source code
@Override
public AWSCredentials get() {
    com.amazonaws.auth.AWSCredentials result = delegate.getCredentials();
    String sessionToken = result instanceof AWSSessionCredentials ? ((AWSSessionCredentials) result).getSessionToken() : null;
    return new AWSCredentials(result.getAWSAccessKeyId(), result.getAWSSecretKey(), sessionToken);
}
Example 96
Project: AIR-master  File: AirpalModule.java View source code
@Singleton
@Provides
@Nullable
public AmazonS3 provideAmazonS3Client(@Nullable AWSCredentials awsCredentials, @Nullable EncryptionMaterialsProvider encryptionMaterialsProvider) {
    if (awsCredentials == null) {
        if (encryptionMaterialsProvider == null) {
            return new AmazonS3Client(new InstanceProfileCredentialsProvider());
        } else {
            return new AmazonS3EncryptionClient(new InstanceProfileCredentialsProvider(), encryptionMaterialsProvider);
        }
    }
    if (encryptionMaterialsProvider == null) {
        return new AmazonS3Client(awsCredentials);
    } else {
        return new AmazonS3EncryptionClient(awsCredentials, encryptionMaterialsProvider);
    }
}
Example 97
Project: airpal-master  File: AirpalModule.java View source code
@Singleton
@Provides
@Nullable
public AmazonS3 provideAmazonS3Client(@Nullable AWSCredentials awsCredentials, @Nullable EncryptionMaterialsProvider encryptionMaterialsProvider) {
    if (awsCredentials == null) {
        if (encryptionMaterialsProvider == null) {
            return new AmazonS3Client(new InstanceProfileCredentialsProvider());
        } else {
            return new AmazonS3EncryptionClient(new InstanceProfileCredentialsProvider(), encryptionMaterialsProvider);
        }
    }
    if (encryptionMaterialsProvider == null) {
        return new AmazonS3Client(awsCredentials);
    } else {
        return new AmazonS3EncryptionClient(awsCredentials, encryptionMaterialsProvider);
    }
}
Example 98
Project: aws-hal-client-java-master  File: HalClient.java View source code
//-------------------------------------------------------------
// Methods - Private
//-------------------------------------------------------------
private <T> T invoke(HttpMethodName httpMethodName, String resourcePath, Object representation, HttpResponseHandler<AmazonWebServiceResponse<T>> responseHandler) throws AmazonClientException {
    ExecutionContext executionContext = createExecutionContext();
    AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
    awsRequestMetrics.startEvent(AWSRequestMetrics.Field.RequestMarshallTime.name());
    Request request = buildRequest(httpMethodName, resourcePath, representation);
    awsRequestMetrics.endEvent(AWSRequestMetrics.Field.RequestMarshallTime.name());
    awsRequestMetrics.startEvent(AWSRequestMetrics.Field.CredentialsRequestTime.name());
    AWSCredentials credentials = awsCredentialsProvider.getCredentials();
    awsRequestMetrics.endEvent(AWSRequestMetrics.Field.CredentialsRequestTime.name());
    executionContext.setCredentials(credentials);
    awsRequestMetrics.startEvent(AWSRequestMetrics.Field.ClientExecuteTime.name());
    Response<T> response = client.execute(request, responseHandler, errorResponseHandler, executionContext);
    awsRequestMetrics.endEvent(AWSRequestMetrics.Field.ClientExecuteTime.name());
    awsRequestMetrics.log();
    return response.getAwsResponse();
}
Example 99
Project: bolton-sigmod2013-code-master  File: DynamoDBClient.java View source code
/**
     * Initialize any state for this DB. Called once per DB instance; there is
     * one DB instance per client thread.
     */
public void init() throws DBException {
    // initialize DynamoDb driver & table.
    String debug = getProperties().getProperty("dynamodb.debug", null);
    if (null != debug && "true".equalsIgnoreCase(debug)) {
        logger.setLevel(Level.DEBUG);
    }
    String endpoint = getProperties().getProperty("dynamodb.endpoint", null);
    String credentialsFile = getProperties().getProperty("dynamodb.awsCredentialsFile", null);
    String primaryKey = getProperties().getProperty("dynamodb.primaryKey", null);
    String consistentReads = getProperties().getProperty("dynamodb.consistentReads", null);
    String connectMax = getProperties().getProperty("dynamodb.connectMax", null);
    if (null != connectMax) {
        this.maxConnects = Integer.parseInt(connectMax);
    }
    if (null != consistentReads && "true".equalsIgnoreCase(consistentReads)) {
        this.consistentRead = true;
    }
    if (null != endpoint) {
        this.endpoint = endpoint;
    }
    if (null == primaryKey || primaryKey.length() < 1) {
        String errMsg = "Missing primary key attribute name, cannot continue";
        logger.error(errMsg);
    }
    try {
        AWSCredentials credentials = new PropertiesCredentials(new File(credentialsFile));
        ClientConfiguration cconfig = new ClientConfiguration();
        cconfig.setMaxConnections(maxConnects);
        dynamoDB = new AmazonDynamoDBClient(credentials, cconfig);
        dynamoDB.setEndpoint(this.endpoint);
        primaryKeyName = primaryKey;
        logger.info("dynamodb connection created with " + this.endpoint);
    } catch (Exception e1) {
        String errMsg = "DynamoDBClient.init(): Could not initialize DynamoDB client: " + e1.getMessage();
        logger.error(errMsg);
    }
}
Example 100
Project: devonaws-labs-java-master  File: Lab41.java View source code
public LabVariables prepMode_Run() throws IOException {
    LabVariables labVariables = new LabVariables();
    AWSCredentials credentials = getCredentials("prepmode");
    AmazonIdentityManagementClient iamClient = new AmazonIdentityManagementClient(credentials);
    //iamClient.setRegion(Lab41.region);
    String trustRelationshipSource = readTextFile("TrustRelationship.txt");
    String developmentPolicyText = readTextFile("development_role.txt");
    String productionPolicyText = readTextFile("production_role.txt");
    // Clean up environment by removing the roles if they exist. 
    optionalLabCode.prepMode_RemoveRoles(iamClient, "development_role", "production_role");
    // Trust relationships for roles (the way we're using them) require the ARN of the user.
    String userArn = labCode.prepMode_GetUserArn(iamClient, LabUserName);
    System.out.println("ARN for " + LabUserName + " is " + userArn);
    String trustRelationship = trustRelationshipSource.replaceAll("\\{userArn\\}", userArn);
    System.out.println("Trust relationship policy:\n" + trustRelationship);
    // Create the roles and store the role ARNs
    labVariables.setDevelopmentRoleArn(labCode.prepMode_CreateRole(iamClient, "development_role", developmentPolicyText, trustRelationship));
    labVariables.setProductionRoleArn(labCode.prepMode_CreateRole(iamClient, "production_role", productionPolicyText, trustRelationship));
    System.out.println("Created development policy role: " + labVariables.getDevelopmentRoleArn());
    System.out.println("Created production policy role: " + labVariables.getProductionRoleArn());
    // Create the bucket names
    String identifier = UUID.randomUUID().toString().substring(0, 8);
    labVariables.getBucketNames().add("dev" + identifier);
    labVariables.getBucketNames().add("prod" + identifier);
    // Create the buckets
    AmazonS3Client s3Client = new AmazonS3Client(credentials);
    s3Client.setRegion(Lab41.region);
    for (String bucketName : labVariables.getBucketNames()) {
        optionalLabCode.prepMode_CreateBucket(s3Client, bucketName, region);
        System.out.println("Created bucket: " + bucketName);
    }
    return labVariables;
}
Example 101
Project: event-collector-master  File: TestS3Combine.java View source code
@BeforeClass
@Parameters({ "aws-credentials-file", "aws-test-bucket" })
public void setUpClass(String awsCredentialsFile, String awsTestBucket) throws Exception {
    String credentialsJson = Files.toString(new File(awsCredentialsFile), Charsets.UTF_8);
    Map<String, String> map = JsonCodec.mapJsonCodec(String.class, String.class).fromJson(credentialsJson);
    String awsAccessKey = map.get("access-id");
    String awsSecretKey = map.get("private-key");
    AWSCredentials awsCredentials = new BasicAWSCredentials(awsAccessKey, awsSecretKey);
    service = new AmazonS3Client(awsCredentials);
    transferManager = new TransferManager(awsCredentials);
    testBucket = awsTestBucket;
    if (!service.doesBucketExist(testBucket)) {
        service.createBucket(testBucket);
    }
}