Skip to main content

Posts

Showing posts with the label Python Programming

Some GUI examples in Python using customtkinter

 Some GUI examples in Python using customtkinter import customtkinter import os from PIL import Image class ScrollableCheckBoxFrame(customtkinter.CTkScrollableFrame):     def __init__(self, master, item_list, command=None, **kwargs):         super().__init__(master, **kwargs)         self.command = command         self.checkbox_list = []         for i, item in enumerate(item_list):             self.add_item(item)     def add_item(self, item):         checkbox = customtkinter.CTkCheckBox(self, text=item)         if self.command is not None:             checkbox.configure(command=self.command)         checkbox.grid(row=len(self.checkbox_list), column=0, pady=(0, 10))         self.checkbox_list.append(checkbox)     def remove_item(self, it...

File Compare Tool in Python and tkinter

 Below is the desktop application developed in Python to compare 2 files and find out the differences. Here is the code: import tkinter as tk from tkinter import filedialog from tkinter import ttk import time import threading class TextComparerApp:     def __init__(self, root):         self.root = root         self.root.title("Text Compare: by Vinayak")         self.root.geometry("800x730")  # Set default window size to 800x730 pixels         # Frame to hold upload buttons         self.upload_frame = tk.Frame(root)         self.upload_frame.pack(pady=10)         # First file upload button         self.upload_button1 = tk.Button(self.upload_frame, activebackground="#99e0aa", text="<--- Insert File 1 --->",                             ...

Add worklog in Jira using Python

 Below is the Python code to add the worklog in Jira. You need to install a request library for this. Here is the code: import requests from requests.auth import HTTPBasicAuth import json url = "https://your jira address here/rest/api/2/issue/ticket_number/worklog" auth = HTTPBasicAuth("username", "jira access token") headers = {     "Accept": "application/json",     "Content-Type": "application/json" } payload = json.dumps({     "comment": {         "content": [             {                 "content": [                     {                         "text": "This is for QA Testing",                         "type": "text"                     } ...

create a groups in Jira API using Python

 Below is the Python code to get create a groups in Jira. You need to install a request library for this. Here is the code: import requests from requests.auth import HTTPBasicAuth import json url = "https://your jira address here/rest/api/3/group" headers = {   "Accept": "application/json",   "Content-Type": "application/json" } payload = json.dumps( {   "name": "QA" } ) response = requests.request(    "POST",    url,    data=payload,    headers=headers,    auth=auth ) print(json.dumps(json.loads(response.text), sort_keys=True, indent=4, separators=(",", ": ")))

Python code to get the groups in Jira

 Below is the Python code to get the groups in Jira. You need to install a request library for this. Here is the code: import requests url = "https://your jira address here/rest/api/2/user/groups?Name=jira name " \       "&accountId=account-id&username=username " payload={} headers = {    'User-Agent': 'Apidog/1.0.0 (https://apidog.com)' } response = requests.request("GET", url, headers=headers, data=payload) print(response.text) ============================================================= import requests from requests.auth import HTTPBasicAuth import json url = "https://your jira address here/rest/api/3/group" auth = HTTPBasicAuth("username", "jira access token") headers = {     "Accept": "application/json" } query = {     'groupId': 'groupId' } response = requests.request(     "GET",     url,     headers=headers,     params=query,     auth=auth ) print(js...

Get all the users from a group in Jira using Python

 Below is the Python code to get all the users from a group in Jira. You need to install a request library for this. Here is the code: import requests from requests.auth import HTTPBasicAuth import json url = "https://your jira address here/rest/api/3/group/member" auth = HTTPBasicAuth("username", "jira token mention here") headers = {   "Accept": "application/json" } query = {   'groupId': '6078c76f-bf47-4573-a150-b9f0285ac8aa' } response = requests.request(    "GET",    url,    headers=headers,    params=query,    auth=auth ) print(json.dumps(json.loads(response.text), sort_keys=True, indent=4, separators=(",", ": ")))

Create worklog in Jira using Python

Below is the Python code to Create worklog in Jira. You need to install a request library for this. import requests from requests.auth import HTTPBasicAuth import json url = "https://your jira address here/worklog" auth = HTTPBasicAuth("username", "jira token mention here") headers = {   "Accept": "application/json" } response = requests.request(    "GET",    url,    headers=headers,    auth=auth ) print(json.dumps(json.loads(response.text), sort_keys=True, indent=4, separators=(",", ": ")))

Capture Network Traffic in Python

  Below is the code to capture a network traffic in python import requests def check_web_traffic (url): try : response = requests.get(url) if response.status_code == 200 : print ( f"Web traffic for { url } is good." ) else : print ( f"Web traffic for { url } is not as expected. Status code: { response.status_code } " ) except requests.RequestException as e: print ( f"An error occurred: { e } " ) if __name__ == "__main__" : url = 'Put website url here' check_web_traffic(url)

Create Progress bar in Python

Following is the code to create a  Progress bar in Python. For that you need to install  tqdm package Here is the code: from tqdm import tqdm from time import sleep pbar = tqdm( total = 100 ) for i in range ( 10 ): sleep( 0.2 ) pbar.update( 10 ) pbar.close()

Generate Logs in Python

 Below is the code which will show how Logging is done in Python: import logging import time class Logger: def __init__ ( self , logger , file_level=logging.info): self .logger = logging.getLogger(logger) self .logger.setLevel(logging.INFO) """ self.logger.setLevel(logging.DEBUG) self.logger.setLevel(logging.INFO) self.logger.setLevel(logging.WARNING) self.logger.setLevel(logging.ERROR) self.logger.setLevel(logging.CRITICAL) """ fmt = logging.Formatter( '%(asctime)s - %(filename)s:[%(lineno)s] - [%(levelname)s] - %(message)s' ) curr_time = time.strftime( "%Y-%m-%d" ) self .LogFileName = '. \\ Logs \\ log' + curr_time + '.log' # "a" to append the logs in same file, "w" to generate new logs and delete old one fh = logging.FileHandler( self .LogFileName , mode = "a" ) fh.set...

Excel file operations in Python

 Below is the code which will demonstrate the Excel file operations in Python import openpyxl def getRowCount (path , sheetName): workbook = openpyxl.load_workbook(path) sheet = workbook[sheetName] return sheet.max_row def getColCount (path , sheetName): workbook = openpyxl.load_workbook(path) sheet = workbook[sheetName] return sheet.max_column def getCellData (path , sheetName , rowNum , colNum): workbook = openpyxl.load_workbook(path) sheet = workbook[sheetName] return sheet.cell( row =rowNum , column =colNum).value def setCellData (path , sheetName , rowNum , colNum , data): workbook = openpyxl.load_workbook(path) sheet = workbook[sheetName] sheet.cell( row =rowNum , column =colNum).value = data workbook.save(path) path = "..//excel//testdata.xlsx" sheetName = "LoginTest" rows = getRowCount(path , sheetName) cols = getColCount(path , sheetName) print (rows , "---" , cols) print (getCellData(path , shee...

Read Excel file data in Python

 Below is the code to Read Excel file data in Python import openpyxl def get_data (sheetName): workbook = openpyxl.load_workbook( "..//excel//testdata.xlsx" ) sheet = workbook[sheetName] totalrows = sheet.max_row totalcols = sheet.max_column mainList = [] for i in range ( 2 , totalrows + 1 ): dataList = [] for j in range ( 1 , totalcols + 1 ): data = sheet.cell( row =i , column =j).value dataList.insert(j , data) mainList.insert(i , dataList) return mainList

SFTP connection in Python

  import paramiko # create ssh client ssh_client = paramiko.SSHClient() # remote server credentials host = "host name here" username = "username here" password = "password here" port = PORT_NUMBER ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh_client.connect( hostname =host , port =port , username =username , password =password) print ( 'connection established successfully' ) ftp = ssh_client.open_sftp() files = ftp.listdir() print ( "Listing all the files and Directory: " , files) # close the connection ftp.close() ssh_client.close()

Files I/O operations in Python

Below is the code for  Files I/O operations in Python import os import sys sys.path.extend( r'mention your local path here' ) print ( 'Get current working directory : ' , os.getcwd()) print ( 'File name : ' , os.path.basename(__file__)) print ( 'Directory Name: ' , os.path.dirname(__file__)) print ( 'Absolute path of file: ' , os.path.abspath(__file__)) print ( 'Absolute directory name: ' , os.path.dirname(os.path.abspath(__file__))) # Specify the directory you want to list directory_path = os.getcwd() # List all files and folders in the directory file_list = os.listdir(directory_path) # Get full paths full_paths = [os.path.join(directory_path , file) for file in file_list] print (full_paths)

Web Scraping in Python

 Here is the code to show how to do Web Scraping in Python import requests from lxml import html from bs4 import BeautifulSoup import csv import pandas as pd from openpyxl import Workbook from openpyxl.utils import get_column_letter from openpyxl.utils.dataframe import dataframe_to_rows from openpyxl.worksheet.table import Table , TableStyleInfo from datetime import datetime page_number = 1 Baseurl = 'https://www.vesselfinder.com' urls = 'https://www.vesselfinder.com/vessels' header = { 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/39.0.2171.95 Safari/537.36' , } # Get HTML Content r = requests.get(urls , headers =header) soup = BeautifulSoup(r.content , 'html.parser' ) page_links = [] table = soup.find( 'table' , class_ = 'results' ) for row in table.tbody.find_all( 'tr' ): # Find all data for each column columns = row...

Web Scraping/Crawling in python using BeautifulSoup

 Below is the code in python to so Web Scraping/Crawling using BeautifulSoup and request library. First you need to install libraries using following commands: 1. pip install BeautifulSoup 2. pip install request Here is the code: import csv from datetime import datetime import random import time import requests import requests from lxml import html from bs4 import BeautifulSoup import pandas as pd import numpy as np counter = 0 d = {} Vessel_record = [] Vessel_info = [] urls = [] url = 'https://www.vesselfinder.com/vessels/details/' file_Imo = "C:\\Users\\vmalge\\PycharmProjects\\vesselInfoAutomation\\InputTestData\\IMO1.xlsx" df = pd.read_excel(file_Imo, index_col=None, na_values=['NA'], usecols="A") # print(df) for i, row in df.iterrows():     for j, column in row.items():         urls.append(url + str(column)) user_agent_list = [     'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.457...

Calculate Percentage return for all stocks/share/equity for a certain period - NSE/Indian stock market - Python

 Below is the code in python to Calculate Percentage return for all stocks/share/equity for a certain period - NSE/Indian stock market here is the code import pandas as pd import glob from csv import writer import csv # The data assigned to the list list_data = ['Stock_Symbol', 'Percentage_Return', 'Period'] # Pre-requisite - The CSV file should be manually closed before running this code. # First, open the old CSV file in append mode, hence mentioned as 'a' # Then, for the CSV file, create a file object with open('Result/All_Stocks_Anaysis.csv', 'a',           newline='') as f_object:     # Pass the CSV  file object to the writer() function     writer_object = writer(f_object)     # Result - a writer object     # Pass the data in the list as an argument into the writerow() function     writer_object.writerow(list_data)     # Close the file object     f_object.close() for file i...

Download historical data for all NSE Stocks in Python

Using Python we can download historical data for all NSE stocks for Indian Stock Market. Below are the steps. 1. First download the csv file containing all NSE stock codes. Go to : https://www.nseindia.com/market-data/securities-available-for-trading and click on  Securities available for Equity segment (.csv) to download the file. We use this file to read stock codes to read historical data for them. 2. Install the yfinance package using following command pip install yfinance  3. Below is the code to read all NSE stock historical data: import yfinance as yf import pandas as pd import os NSE_all_stock_codes = pd.read_csv(f'./NSE_Stock_Codes/EQUITY_ALL.csv') print(NSE_all_stock_codes.SYMBOL) for file in os.scandir(f'Historical_Data_All/'):     if file.name.endswith(".csv"):         os.unlink(file.path) for nse_symbol in NSE_all_stock_codes.SYMBOL:     try:         nse_symbol_data = yf.download(f'{nse_symbol}.NS', perio...

How to read a config file in python

 Below is an example which shows how to read a config file in python. Below is the sample config file. Just copy paste the text and save the file as config.ini [basic info] testsiteurl=https://www.google.com/ [locators] #page locators Logo_XPATH=//img[@class='logo'] Searchbar_XPATH=//*[@id='testbox'] Now its time to read the data from above config file. Below is the python code to read the data which is present in above config file. Save the file as configReader.py from configparser import ConfigParser def readConfig(section, key):     config = ConfigParser()     config.read("..\\conf.ini")     return config.get(section, key) print(readConfig("locators","Logo_XPATH"))

Sample file to install dependencies in python

Below is the sample dependencies list file to install for pyhon project. Just do copy paste and save the file as requirement.txt: allure-pytest==2.13.2 allure-python-commons==2.13.2 apipkg==1.5 astor==0.8.1 atomicwrites==1.4.0 attrs==20.3.0 certifi==2021.10.8 chardet==4.0.0 colorama==0.4.4 configparser==5.0.2 crayons==0.4.0 execnet==1.8.0 idna==2.10 iniconfig==1.1.1 packaging==20.9 pluggy==0.13.1 py==1.10.0 pyparsing==2.4.7 pytest==7.4.3 pytest-ast-transformer==1.0.3 pytest-forked==1.3.0 pytest-html==4.1.1 pytest-metadata==3.0.0 pytest-soft-assertions==0.1.2 pytest-xdist==2.2.1 requests==2.25.1 selenium==4.16.0 six==1.15.0 texttable==1.6.3 toml==0.10.2 urllib3==1.26.4 webdriver-manager==4.0.1 openpyxl==3.1.2 coloredlogs==15.0.1 Unidecode == 1.3.7 beautifulsoup4 == 4.12.2 selenium-wire == 5.1.0 In order to install all above dependencies, you need to navigate to the path where the requirement.txt file is located. After locating the path you need to simpliy execute following commmnd to in...