Log4j2 configuration example

Here is a simple log4j2 configuration file with a custom logging level for select few packages

  1. Create a file log4j2.xml at src/main/resources directory
  1. <?xml version="1.0" encoding="UTF-8"?><Configuration status="WARN"> <Appenders> <Console name="Console" target="SYSTEM_OUT"> <PatternLayout pattern="%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n"/> </Console> </Appenders> <Loggers> <Logger name="com.foo.Bar" level="trace"> <AppenderRef ref="Console"/> </Logger> <Root level="error"> <AppenderRef ref="Console"/> </Root> </Loggers></Configuration>

Howto marshal an instance of protobuf to JSON

Proto

syntax = "proto3";

package myproto;

message MyMessage {
    string name = 1;
    int32 age = 2;
}

package main

import (
    "fmt"
    "github.com/golang/protobuf/jsonpb"
    "github.com/golang/protobuf/proto"
    "myproto" // Import the generated protobuf package
)

func main() {
    // Create an instance of MyMessage
    myMessage := &myproto.MyMessage{
        Name: "Alice",
        Age:  30,
    }

    // Create a JSON marshaler
    marshaller := &jsonpb.Marshaler{}

    // Marshal the protobuf message to JSON
    jsonString, err := marshaller.MarshalToString(myMessage)
    if err != nil {
        fmt.Println("Error marshaling to JSON:", err)
        return
    }

    // Print the JSON string
    fmt.Println(jsonString)
}

How to unmarshal protobuf to json in golang

package main

import (
	"encoding/json"
	"fmt"
)

// Define a struct
type SomeStruct struct {
	ID    int    `json:"id"`
	Title string `json:"title"`
}

func main() {
	jsonString := `{
        "id": 1,
        "title": "Some title"
    }`

	var item SomeStruct

	// Unmarshal the JSON data into the struct
	if err := json.Unmarshal([]byte(jsonString), &item); err != nil {
		fmt.Println("Error unmarshaling content:", err)
		return
	}

	fmt.Println("ID:", item.ID)
	fmt.Println("Title:", item.Title)
}

Linux tuning : Network performance

Here are few setting that can greatly improve network performance of your Linux machine:

# set OS max read & write buffers
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216

#TCP Autotuning settings
net.ipv4.tcp_rmem = 4096 87380 16777216
net.ipv4.tcp_wmem = 4096 87380 16777216

# disable slow start after idle
net.ipv4.tcp_slow_start_after_idle = 0

#enable TCP window scaling
net.ipv4.tcp_window_scaling = 1

With these settings in place, we stopped seeing all the Netty reconnect problems in the Storm topologies. Make sure to apply these settings to all Storm Supervisor nodes.

These settings can be applied at the run time using sysctl command as, but this will not persist between the system reboots:

$ sudo sysctl -w net.ipv4.tcp_rmem = "4096 87380 16777216"

To permanently save them update /etc/sysctl.conf or a file under /etc/sysctl.d, and run this command:
$ sudo sysctl -p /etc/sysctl.conf

 

Get current Epoch timestamp

#java:

System.currentTimeMIllis();
Note: the value is in milliseconds. Make sure to divide by 1000 if you need the value in seconds

#php:

time()

#objective-c:

NSDate now = [NSDate date];
NSTimeInterval nowEpochSeconds = [now timeIntervalSince1970];

# perl:

time

# python:

import

int(time.time())

#.NET C#

# MySQL:

SELECT unix_timestamp(now());

# Unix/Linux shell:

$ date +%s

#

Multiprocessing jobs with worker pool in Python

#multiprocessing jobs with worker pool in python

from multiprocessing import Process, Lock, Pool
import time
import datetime
import sys
import os
from subprocess import Popen, PIPE

def proc(metric):
    print "Working on %s"%(metric)
    cmd1 = "/usr/bin/tsdb scan --import 2012/04/10 sum %s"%(metric)
    cmd2 = "grep %s"%(metric)

    logfile = open("%s.dat.gz"%(metric), 'w')
    p1 = Popen(cmd1.split(" "), stdout=PIPE)
    p2 = Popen(cmd2.split(" "), stdin=p1.stdout, stdout=PIPE)
    p3 = Popen(['gzip'], stdin=p2.stdout, stdout=logfile)
    p3.communicate()
    logfile.close()

if __name__ == '__main__':
    f = open('file_list_of_metrics.txt', 'r')
    l = [i.rstrip() for i in f]
    pool = Pool(processes=10)
    pool.map(proc, l, 1)
    pool.close()
    pool.join()