Showing posts with label java. Show all posts
Showing posts with label java. Show all posts

Sunday, July 26, 2020

Spring Boot application - integration with Vault

Description

Vault is a very useful tool to store sensitive data in secure way. To get the data is necessary to pass thorough authentication process. Applications can get credential and certificates to DB, internal and external services,  file storages  etc. In addition Voult can encrypt data which could be store for example in DB (this case won't be checked in this post). 



In our common case we prepare simple application to grab sensitive information. We only put that data to the logger to check solution.







The Solution

Volt

Basic Vault server configuration is described at: https://spring.io/guides/gs/vault-config/. There exists important information such as Java version or path to the sources  (https://www.vaultproject.io/downloads).  It is recommended to add voult's path to the system variables. 

Lets start the Vault server:
vault server --dev --dev-root-token-id="00000000-0000-0000-0000-000000000000"  




Next lets add secrets:

vault kv put secret/artsci-vault-config artsci.username=artsciUser artsci.password=artsciPass




The same result we can see in web browser (http://localhost:8200/). It is necessary to use token we defined at the beginning
(00000000-0000-0000-0000-000000000000)



Then select 'secret' path:



And finally we can see previously created secret element.

 
As You can see everything is correct. You can manage this item. You  can create new version or delete this item.


Spring boot application

I created new application with configuration. Very important is bootstrap.properties file. That configuration is loaded at the beginning.  



 pom.xml

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
  <modelVersion>4.0.0</modelVersion>
  <groupId>com.artsci</groupId>
  <artifactId>artsciVoultSpring</artifactId>
  <version>0.0.1-SNAPSHOT</version>
  <name>Voult client </name>
  
  
    <parent>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-parent</artifactId>
        <version>2.2.1.RELEASE</version>
    </parent>

    <dependencies>

        <!-- Vault Starter -->
        <dependency>
            <groupId>org.springframework.cloud</groupId>
            <artifactId>spring-cloud-starter-vault-config</artifactId>
        </dependency>

        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-test</artifactId>
            <scope>test</scope>
        </dependency>
        
        <dependency>
    <groupId>org.projectlombok</groupId>
    <artifactId>lombok</artifactId>
    <version>1.18.12</version>
    <scope>provided</scope>
</dependency>
<dependency>
    <groupId>org.slf4j</groupId>
    <artifactId>slf4j-simple</artifactId>
    <version>1.8.0-beta4</version>     
</dependency>
        
    </dependencies>

    <dependencyManagement>
        <dependencies>
            <dependency>
                <groupId>org.springframework.cloud</groupId>
                <artifactId>spring-cloud-dependencies</artifactId>
                <version>${spring-cloud.version}</version>
                <type>pom</type>
                <scope>import</scope>
            </dependency>
        </dependencies>
    </dependencyManagement>

    <properties>
        <java.version>1.8</java.version>
        <spring-cloud.version>Greenwich.SR2</spring-cloud.version>
    </properties>

    <build>
        <plugins>
            <plugin>
                <groupId>org.springframework.boot</groupId>
                <artifactId>spring-boot-maven-plugin</artifactId>
            </plugin>
        </plugins>
    </build>
    
    <pluginRepositories>
        <pluginRepository>
            <id>central</id>
            <name>Central Repository</name>
            <url>https://repo.maven.apache.org/maven2</url>
            <layout>default</layout>
            <snapshots>
                <enabled>false</enabled>
            </snapshots>
            <releases>
                <updatePolicy>never</updatePolicy>
            </releases>
        </pluginRepository>
    </pluginRepositories>
    <repositories>
        <repository>
            <id>central</id>
            <name>Central Repository</name>
            <url>https://repo.maven.apache.org/maven2</url>
            <layout>default</layout>
            <snapshots>
                <enabled>false</enabled>
            </snapshots>
        </repository>
    </repositories>
</project>

bootstrap.properties

spring.application.name=artsci-vault-config
spring.cloud.vault.uri=http://localhost:8200
spring.cloud.vault.token=00000000-0000-0000-0000-000000000000
spring.cloud.vault.scheme=http
spring.cloud.vault.kv.enabled=true


VoltVariables .class

package artsciVoultSpring;

import org.springframework.boot.context.properties.ConfigurationProperties;
import lombok.Data;

@ConfigurationProperties("artsci")
@Data
public class VoltVariables {
private String username;
private String password;
}


ArtsciSpringVoultApp 

package artsciVoultSpring;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.CommandLineRunner;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.context.properties.EnableConfigurationProperties;


@SpringBootApplication
@EnableConfigurationProperties(VoltVariables.class)
public class ArtsciSpringVoultApp implements CommandLineRunner  {
private final VoltVariables voltVariables;
public ArtsciSpringVoultApp (VoltVariables voltVariables) {
this.voltVariables = voltVariables;
}
public static void main(String[] args) {
SpringApplication.run(ArtsciSpringVoultApp.class, args);
}
public void run(String... args) {

    Logger logger = LoggerFactory.getLogger(ArtsciSpringVoultApp.class);

    logger.info("----------------------------------------");
    logger.info("Configuration properties");
    logger.info("Username: {}", voltVariables.getUsername());
    logger.info("Password: {}", voltVariables.getPassword());     
    logger.info("----------------------------------------");
  }
}

The Results

At the end we can compare properties in Voult and in application logs


So everything looks good. Variables are exactly the same :)

Tuesday, July 7, 2020

Kafka, Streams, Java producer and consumer

Kafka, Kafka Streams, Producer and Consumer

The theory

Apache Kafaka is high throughput integration system to pass through messages from source to target so Apache Kafka is a kind of pipeline.This is high level overview. Apache Kafka can work as a single instance or cluster of brokers which uses Zookepeer. The Zookeeper is responsible for store information about brokers node and topics. In addition it is new approach to integrate system. Base on Apache Kafka are built awesome projects like i.e.  Confluent  (https://docs.confluent.io/current/platform.html) which has many new connectors (JMS, MQTT, Cassandra, Solr, S3, DynamoDB, HDFS,..) and other elements: Kafka Connect, ksqldb, rest-proxy. Incoming streams can be filter or parse on-fly and results can be store in the destination place (DB, file store, ElasticSearch or other systems). For that purpose we can use Kafka Streams or KSQL. Each element are transported as a binary object and if producer and consumer wants to use complex structure of messages they should use external Schema Registry (it is also available in mentioned Confluent project).




















Internally Apache Kafka contains topics and each topic has one or more partitions and replicas. As we can see below I prepare cluster draft which base on 3 brokers. There is a three partitions and each leader partition has got two additional followers. That configuration of Kafka cluster guarantee high durable and throughput. In case of damage one broker the other brokers handle events and automatically switch partition leader to another broker. Very important parameters are:
--replication-factor 3 --partitions 3   



















The Producer

The Producer prepare message and put them to Kafka. Depends of acknowledge It can work in a few configurations:

  • acks - 0 >> No acknowledge is required
  • acks -1 >> Only partition leader acknowledge is required
  • acks -all >> Partition leader and partitions followers acknowledge is required

Throughput and duration depends on that configuration. If we aggregate application logs we can chose acks=0 (lost some part of data is not critical), but if we want to store business data we should consider acks=all (we must keep all data). 

































Generally, producer should have got other properties like timeouts, retries, etc. It is also available to configure producer as idempotent producer (enable.idempotence=true). Kafka checks duplicates and potentially it could be higher latency.

In addition the producer can send a message:

  1. with defined key (spread across partition based on key hash ordering)
  2. without a key (spread across partitions with round robin approach) 


The Consumer

Kafka store data seven days (default). Each group receives data from topic from all partitions. The number of connection between consumer and partitions strictly depend on number of consumers in single group.






















Delivery approach for consumers can be specified as:

  • at most one >> as soon as the data is received the offset commit is executed (possibility to lose data if consumer throw an error) 
  • at least once >> as soon as consumer finished processing the offset commit is executed (possibility to duplicate data - it is necessary to prevent that kind of situation)
  • exactly one >> this approach is dedicated for stream processing

The environment

Let's prepare environment with Apache Kafka. There is a few way but I choose Confluent project based on Docker. You can find documentation there: https://docs.confluent.io/current/quickstart/ce-docker-quickstart.html. Follow the steps and I'm sure You will have entire environment prepared to tests.

In that project is docker-compose.yml with definied serwers (below). There are very important configuration so If You want to change some variables, ports etc. this is the best place.
---
version: '2'
services:
  zookeeper:
    image: confluentinc/cp-zookeeper:5.5.1
    hostname: zookeeper
    container_name: zookeeper
    ports:
      - "2181:2181"
    environment:
      ZOOKEEPER_CLIENT_PORT: 2181
      ZOOKEEPER_TICK_TIME: 2000
  broker:
    image: confluentinc/cp-kafka:5.5.1
    hostname: broker
    container_name: broker
    depends_on:
      - zookeeper
    ports:
      - "29092:29092"
      - "9092:9092"
    environment:
      KAFKA_BROKER_ID: 1
      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
      KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
      KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
      KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
...
...
After you run Confluent project you can see running servers











Let's connect to 'broker' server and create new 'artsci' topics from command line:
1. connect to 'broker' server




2. find main Kafka folder location





3. Create topic 'artsci' with 3 partition and 1 replication-factor (because maximum number of replication partition depends on brokers nodes)
kafka-topics --bootstrap-server localhost:9092 --topic artsci-topic --create --partitions 3 --replication-factor 1 

4. Let's find new topic 'artsci' in Confluent project web application (http://localhost:9021/)


The Producer application

To produce messages we can use CLI (kafka-console-producer --broker-list 127.0.0.1:9092 --topic artsci-topic --property parse.key=true --property key.separator=,)








To produce message we can use also Java application

pom.xml:
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
  <modelVersion>4.0.0</modelVersion>
  <groupId>kafka.artsci</groupId>
  <artifactId>kafkaProducer</artifactId>
  <version>0.0.1-SNAPSHOT</version>
  <name>KafkaProducer</name>
  <description>KafkaProducer application in java</description>


  <dependencies>
     
<dependency>
    <groupId>org.slf4j</groupId>
    <artifactId>slf4j-simple</artifactId>
    <version>1.8.0-beta4</version>  
</dependency>
     
<dependency>
    <groupId>org.apache.kafka</groupId>
    <artifactId>kafka-clients</artifactId>
    <version>2.5.0</version>
</dependency>     
    </dependencies>

    <build>
        <plugins>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-compiler-plugin</artifactId>
                <version>3.8.1</version>
                <configuration>
                    <source>1.8</source>
                    <target>1.8</target>
                </configuration>
            </plugin>
        </plugins>
    </build>

</project>

Java class:

package kafkaProducer;


import java.util.Properties;

import org.apache.kafka.clients.producer.Callback;

import org.apache.kafka.clients.producer.KafkaProducer;

import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.serialization.StringSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class ArtsciKafkaProducer {
private static final String kafkaServer = "127.0.0.1:9092";
private static final String topicName = "artsci-topic";
private static final String ALL = "all";
private static final String TRUE = "true";
private static final String MAX_CONN = "5";
static Logger log = LoggerFactory.getLogger(ArtsciKafkaProducer.class.getName());
public static void main(String[] args) {
log.info("Start");
KafkaProducer<String, String> producer = createProducer();
ProducerRecord<String, String> rec = new ProducerRecord<String, String>(topicName, "key_101", "message 101");
producer.send(rec, new Callback() {
                   public void onCompletion(RecordMetadata metadata, Exception e) {
                       if(e != null) {
                          log.error("Error send message!",e); 
                       } else {
                        log.info("The offset of the record we just sent is: " + metadata.offset());
                       }
                   }
               });
producer.flush();
producer.close();
}
private static KafkaProducer<String, String> createProducer() {
log.info("Prepare Producer");
Properties prop = new Properties();
prop.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServer);
prop.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
prop.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
prop.setProperty(ProducerConfig.ACKS_CONFIG, ALL);
prop.setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, TRUE);
prop.setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, MAX_CONN);
KafkaProducer<String, String> producer = new KafkaProducer<String, String>(prop);
return producer;
}
}



The Consumer application

To consume messages we can use CLI (kafka-console-consumer --bootstrap-server 127.0.0.1:9092 --topic artsci-topic --from-beginning --property print.key=true --property key.separator=,)







To consume messages we can use also Java application. 

pom.xml:

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">

  <modelVersion>4.0.0</modelVersion>
  <groupId>kafka.artsci</groupId>
  <artifactId>kafkaConsumer</artifactId>
  <version>0.0.1-SNAPSHOT</version>
  <name>KafkaConsumer</name>
  <description>KafkaConsumer</description>
  
  <dependencies>
        
<dependency>
    <groupId>org.slf4j</groupId>
    <artifactId>slf4j-simple</artifactId>
    <version>1.8.0-beta4</version>     
</dependency>
        
<dependency>
    <groupId>org.apache.kafka</groupId>
    <artifactId>kafka-clients</artifactId>
    <version>2.5.0</version>
</dependency>        
    </dependencies>

    <build>
        <plugins>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-compiler-plugin</artifactId>
                <version>3.8.1</version>
                <configuration>
                    <source>1.8</source>
                    <target>1.8</target>
                </configuration>
            </plugin>
        </plugins>
    </build>
    
</project>

Java class:

package kafkaConsumer;

import java.time.Duration;

import java.util.Arrays;
import java.util.Properties;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class ArtsciKafkaConsumer {
private static final String kafkaServer = "127.0.0.1:9092";
private static final String topicName = "artsci-topic";
private static final String groupId = "artsci-consumer-group";
private static final String offset = "earliest";
static Logger log = LoggerFactory.getLogger(ArtsciKafkaConsumer.class.getName());
public static void main(String[] args) {
log.info("Start");
KafkaConsumer<String, String> consumer = createConsumer();
consumer.subscribe(Arrays.asList(topicName));
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000L));
for(ConsumerRecord<String, String> record : records) {
log.info("Receive record -> Key: "+record.key()+" value: "+record.value());
}
}
private static KafkaConsumer<String, String> createConsumer() {
log.info("Prepare Producer");
Properties prop = new Properties();
prop.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServer);
prop.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
prop.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
prop.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId);
prop.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, offset);
KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(prop);
return consumer;
}
}

The output
[main] INFO kafkaConsumer.ArtsciKafkaConsumer - Receive record -> Key: key_2 value: message 2[main] INFO kafkaConsumer.ArtsciKafkaConsumer - Receive record -> Key: key_4 value: message 4[main] INFO kafkaConsumer.ArtsciKafkaConsumer - Receive record -> Key: key_5 value: message 5[main] INFO kafkaConsumer.ArtsciKafkaConsumer - Receive record -> Key: key_1 value: message 1[main] INFO kafkaConsumer.ArtsciKafkaConsumer - Receive record -> Key: key_3 value: message 3[main] INFO kafkaConsumer.ArtsciKafkaConsumer - Receive record -> Key: key_101 value: message 101

Links

https://kafka.apache.org/081/documentation.html
https://docs.confluent.io/current/quickstart/ce-docker-quickstart.html
https://docs.confluent.io/current/platform.html


Others subjects

Very important is security in Kafka cluster. Authentication and authorization for internally API between zookeeper and brokers and external for consumers API.

Monday, June 15, 2020

AWS Lambda - python and java

AWS Lambda

Overview

AWS Lambda is a serverless, automatically scaling component for handling requests from various sources. This computing service is very helpful to prepare data, check changed element on S3, etc.It also could work as an element to handle requests from web services. Below is prepared very simple solution which contains two AWS Lambda functions. First one (created in python) is responsible for logic. The second one (created in java) is called for calculate array's average value.  






The solution

AWS Lambda in python

I prepared python code with the logic. Function calculates internally sum of array's elements. If average function is selected then next one AWS Lambda is called. 


import logging
import boto3
import json
logger = logging.getLogger()
logger.setLevel(logging.INFO)
client = boto3.client('lambda')
def lambda_handler(event, context):
    logger.info("FrontFunction got event{}".format(event))
    operation = event['operation']
    listOfItems = event['listOfItems']
 
    arrayStr = []
 
    if (operation == 'avg'):
        for i in listOfItems:
            arrayStr.append(i)
        response = client.invoke(
            FunctionName="Java8Tutorial",
            InvocationType='RequestResponse',
            Payload=json.dumps({"queryParameters": {"listOfItems":arrayStr}})
        )
        response = json.loads(response['Payload'].read().decode("utf-8"))
    elif (operation == 'sum'):
        out = 0
        for i in listOfItems:
            logger.info(i)
            out += int(i)
        response = out 
    else:
        response = 'Not defined'
 
    logger.info(response)
    return response

GatewayAPI contains reference to this Lambda function.  

































CloudWatch component for monitoring is integrated with function's dashboard. 





























On the top toolbar is also available button to configure function throttling.   

GatewayAPI

Gateway API couples HTTP requests (methods: GET, PUT, POST, DELETE,.. )  with Lambda functions. My API is not protected because it is only for testing but it is a good practice to protect communication. Below we can see the HTTP mapping to the Lambda Function.  
























AWS Lambda in java

Second lambda is created in Java. Java works generally slower then Python and "coldstart" consume more time. We can see simple code for parse JSON and calculate average value.   


package com.amazonaws.lambda.java8tutorial;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.util.OptionalDouble;
import com.amazonaws.services.lambda.runtime.Context;
import com.amazonaws.services.lambda.runtime.LambdaLogger;
import com.amazonaws.services.lambda.runtime.RequestStreamHandler;
import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
import org.json.simple.parser.ParseException;
import org.json.simple.parser.JSONParser;
public class LambdaFunctionHandler implements RequestStreamHandler {
    @Override
    @SuppressWarnings("unchecked")
    public void handleRequest(InputStream input, OutputStream output, Context context) throws IOException {
            LambdaLogger logger = context.getLogger();
        logger.log("Start ARTSCI: Java Lambda handler ");             
        BufferedReader reader = new BufferedReader(new InputStreamReader(input));
        JSONObject outJson = new JSONObject();     
        JSONArray arrayItems = new JSONArray();
        String outputString = "Array's avg value is ";
        try {
        JSONParser parser = new JSONParser();
            JSONObject event = (JSONObject)parser.parse(reader);
            if (event.get("queryParameters") != null) {
                JSONObject qps = (JSONObject)event.get("queryParameters");
                if ( qps.get("listOfItems") != null) {                                arrayItems = (JSONArray) qps.get("listOfItems");
                OptionalDouble avg = arrayItems.stream().mapToInt(str -> Integer.parseInt((String) str)).average();
                    outputString += avg.getAsDouble();
                }
            }
         
         
            JSONObject responseBody = new JSONObject();
            responseBody.put("message", outputString);
            outJson.put("body", responseBody);
         
        } catch(ParseException pex) {
        outJson.put("statusCode", "400");
        outJson.put("exception", pex);
        }
     
     
        logger.log(outJson.toJSONString());
        OutputStreamWriter writer = new OutputStreamWriter(output, "UTF-8");
        writer.write(outJson.toJSONString());
        writer.close();
    }
}

Very important is to change permissions to allow one Lambda function call another one Lambda function.  


















The Results

At the end it is necessary to test all functionality.

Calculation of AVG
C:\Users\Artur>curl --location --request GET https://[unique].execute-api.eu-central-1.amazonaws.com/dev?name=artsci --header "Content-Type: application/json" --data-raw "{\"operation\": \"avg\",\"listOfItems\": [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\"]}"
{"body": {"message": "Array's avg value is 3.5"}}

Calculation of SUM
C:\Users\Artur>curl --location --request GET https://[unique].execute-api.eu-central-1.amazonaws.com/dev?name=artsci --header "Content-Type: application/json" --data-raw "{\"operation\": \"sum\",\"listOfItems\": [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\"]}"
21








Saturday, December 29, 2018

Security - Ciphers


Introduction


Today I'm going to briefly present you a topic witch points to part of security - the ciphers. At the beginning I'll show you main classification of the ciphers and next in separately chapters will describe each kind of.





As You can see there is three main type of ciphers:

  1. Simple
  2. Asymmetric
  3. Symmetric

 Simple ciphers

Substitution ciphers generally replace each character with other one or some groups of charactes are replaced with other characrer or groups of characters. Depending of compexity and advanced of ciphers one character could be replaced by single character or character wich belongs to defined list of elements.

Transposition ciphers based on changing position of the characters. There could be build appropriate matrix where number of columns could depend on secret key. Changing the order of the columns generate unreadable text for the others.

Modern ciphers are mix of Substitution and Transposition ciphers. Lets see next categories.



Asymmetric ciphers

Asymmetric cryptography is kind of cryptography wich base on private and public key. One key is used to encrypt a message and the other key is used to decrypt the message. Is not possible to execute both operation encryption and decryption using one key. Usually is recommended to use public key for encryption so only owner of the private key can decrypt the message.
Encryption algorithm should be used with padding PKCS#1 or OEAP because of security.

The asymmetric algorithms:

  • RSA
  • Diffie–Hellman (one of the key-agreement algorithms)


Below is example of using RSA algorithm with OAEP padding. I use bouncycastle library (https://www.bouncycastle.org/)



package security;

import java.security.InvalidKeyException;
import java.security.Key;
import java.security.KeyPair;
import java.security.KeyPairGenerator;
import java.security.NoSuchAlgorithmException;
import java.security.NoSuchProviderException;
import java.security.SecureRandom;
import java.security.Security;
import javax.crypto.BadPaddingException;
import javax.crypto.Cipher;
import javax.crypto.IllegalBlockSizeException;
import javax.crypto.NoSuchPaddingException;

/**
 *
 * @author Artur
 */
public class AsyncCipher {
    private static final String incommingMessage = "message for encryption";
   
    public static void main(String[] args)
            throws InvalidKeyException, NoSuchAlgorithmException,
            IllegalBlockSizeException, BadPaddingException,
            NoSuchProviderException, NoSuchPaddingException {
       
        long startTime = System.currentTimeMillis();
        asyncCipher(incommingMessage);
        long endTime = System.currentTimeMillis();
        System.out.println("Execution time: " + ((endTime - startTime)) + " milliseconds");
    } 
   
    public static void asyncCipher(final String inMessage)
            throws InvalidKeyException, NoSuchAlgorithmException,
            IllegalBlockSizeException, BadPaddingException,
            NoSuchProviderException, NoSuchPaddingException {
       
        Security.addProvider(new org.bouncycastle.jce.provider.BouncyCastleProvider());

        byte[] message = inMessage.getBytes();
        Cipher rsaOaep = Cipher.getInstance("RSA/None/OAEPWithSHA1AndMGF1Padding", "BC");
        SecureRandom sr = new SecureRandom();
        KeyPairGenerator generator = KeyPairGenerator.getInstance("RSA", "BC");

        generator.initialize(2048, sr);

        KeyPair pair = generator.generateKeyPair();
        Key pubKey = pair.getPublic();
        Key privKey = pair.getPrivate();

        rsaOaep.init(Cipher.ENCRYPT_MODE, pubKey, sr);        
                    long startTime = System.currentTimeMillis();
                    byte[] encryptedMessage = rsaOaep.doFinal(message);
                    long endTime = System.currentTimeMillis(); 
                    System.out.println("Encryption Execution Time: " + ((endTime - startTime)) + " milliseconds; Encrypted                                message: ".concat(Base64.encodeBase64String(encryptedMessage)));

        rsaOaep.init(Cipher.DECRYPT_MODE, privKey);
        byte[] decryptedMessage = rsaOaep.doFinal(encryptedMessage);
        System.out.println("Decrypted message : ".concat(new String(decryptedMessage))); 
    }
}

Output:

Encryption Execution Time: 1 milliseconds; Encrypted message: Xby9krU99bBxdtr3ea+0tSlP0j7IySiysTmT4PTc/e2L6yIsulnagsrWYNRm59q1Vpt1fOZFJA/7IO+hwngY2ghWihhcGFRfrtPqzWp5Xc6afhg1u4iHO9RhOQc+s2Kfvg+4O7/+zNuCL4HTB4wkEdMS3qU4MPGtaD1o+MygsjPFJJoJZ/ZBn5wbIttjQGpE+Bp6ECQL70D9X8RGyYuuGvaPK9csP7ENMCCmtk0G0uPy7AKpe77A5xEOfUC9K68+af4XMHXeg+0PuhHVlsVSCG972cqlCz1vmKayumnDyzbK/eARrDL3xW54RqVAQlWQgeolcEHzapKcAc4qWuLf9Q==
Decrypted message : message for encryption
Execution time: 1584 milliseconds


I mentioned about key-agreement algorithms . This is base functionality used in SSL/TLS protocols.

Symmetric ciphers

Symmetric ciphers use the same key for encryption and decryption. There are two types of symmetric ciphers - stream and block ciphers. Below is attached diagram with the structure of symmetric ciphers.




Block ciphers split plain text in blocks which have the same size and next encrypt them one by one. Algorithm AES with CBC mode is able to mix data from different blocks. Stream ciphers work on stream of plaintext data and they do not divide it into smaller blocks.


Below is example of using AES algorithm with CBC mode. For authenticated encryption I recommend to use GCM mode.


package security;

import com.google.api.client.repackaged.org.apache.commons.codec.binary.Base64;
import java.io.UnsupportedEncodingException;
import java.security.InvalidAlgorithmParameterException;
import java.security.InvalidKeyException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.security.NoSuchProviderException;
import javax.crypto.BadPaddingException;
import javax.crypto.Cipher;
import javax.crypto.IllegalBlockSizeException;
import javax.crypto.NoSuchPaddingException;
import javax.crypto.spec.IvParameterSpec;
import javax.crypto.spec.SecretKeySpec;
import org.bouncycastle.util.encoders.Hex;


/**
 *
 * @author Artur
 */
public class SyncCipher {
     private static final String incommingMessage = "message for encryption";
    
    public static void main(String[] args) 
            throws InvalidKeyException, NoSuchAlgorithmException, 
            IllegalBlockSizeException, BadPaddingException, 
            NoSuchProviderException, NoSuchPaddingException, InvalidAlgorithmParameterException, UnsupportedEncodingException {
        
        long startTime = System.currentTimeMillis();
        syncCipher(incommingMessage);
        long endTime = System.currentTimeMillis();
        System.out.println("Total Execution Time: " + ((endTime - startTime)) + " milliseconds");
    }  
    
    public static void syncCipher(final String inMessage) 
            throws InvalidKeyException, NoSuchAlgorithmException, 
            IllegalBlockSizeException, BadPaddingException, 
            NoSuchProviderException, NoSuchPaddingException, InvalidAlgorithmParameterException, UnsupportedEncodingException {
        
        java.security.Security.addProvider(new org.bouncycastle.jce.provider.BouncyCastleProvider());

        IvParameterSpec iv = new IvParameterSpec("myEncIntVector_1".getBytes("UTF-8"));
        SecretKeySpec skeySpec = new SecretKeySpec("myEncryptionKey1".getBytes("UTF-8"), "AES");

        Cipher cipher = Cipher.getInstance("AES/CBC/PKCS5PADDING");
        cipher.init(Cipher.ENCRYPT_MODE, skeySpec, iv);
        long startTime = System.currentTimeMillis();
        byte[] encryptedMessage = cipher.doFinal(inMessage.getBytes());  
        long endTime = System.currentTimeMillis();
        System.out.println("Encryption Execution Time: " + ((endTime - startTime)) + " milliseconds; Encrypted message: ".concat(Base64.encodeBase64String(encryptedMessage)));

        cipher.init(Cipher.DECRYPT_MODE, skeySpec, iv);
        byte[] decryptedMessage = cipher.doFinal(encryptedMessage);
        System.out.println("Decrypted message : ".concat(new String(decryptedMessage)));  
    }
}


Encryption Execution Time: 0 milliseconds; Encrypted message: 5YWJup+o7SRd3o1rtqS1OtTCx7RpY8og8WDBZVXRNZM=
Decrypted message : message for encryption
Total Execution Time: 339 milliseconds